From 2a8513eee0653493dff211681d45b78c45d72d61 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 23 Mar 2023 19:49:13 -0400 Subject: [PATCH 1/2] adjust textual inversion training parameters according to xformers availability - If xformers is available, then default "use xformers" checkbox to on. - Increase batch size to 8 (from 3). --- ldm/invoke/training/textual_inversion.py | 14 ++++++++------ ldm/invoke/training/textual_inversion_training.py | 7 ++++--- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/ldm/invoke/training/textual_inversion.py b/ldm/invoke/training/textual_inversion.py index 2961e4d99c..f620bbf71e 100755 --- a/ldm/invoke/training/textual_inversion.py +++ b/ldm/invoke/training/textual_inversion.py @@ -17,6 +17,7 @@ from pathlib import Path from typing import List, Tuple import npyscreen +from diffusers.utils.import_utils import is_xformers_available from npyscreen import widget from omegaconf import OmegaConf @@ -29,7 +30,7 @@ from ldm.invoke.training.textual_inversion_training import ( TRAINING_DATA = "text-inversion-training-data" TRAINING_DIR = "text-inversion-output" CONF_FILE = "preferences.conf" - +XFORMERS_AVAILABLE = is_xformers_available() class textualInversionForm(npyscreen.FormMultiPageAction): resolutions = [512, 768, 1024] @@ -178,7 +179,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction): out_of=10000, step=500, lowest=1, - value=saved_args.get("max_train_steps", 3000), + value=saved_args.get("max_train_steps", 2500), scroll_exit=True, ) self.train_batch_size = self.add_widget_intelligent( @@ -187,7 +188,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction): out_of=50, step=1, lowest=1, - value=saved_args.get("train_batch_size", 8), + value=saved_args.get("train_batch_size", 8 if XFORMERS_AVAILABLE else 3), scroll_exit=True, ) self.gradient_accumulation_steps = self.add_widget_intelligent( @@ -225,7 +226,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction): self.enable_xformers_memory_efficient_attention = self.add_widget_intelligent( npyscreen.Checkbox, name="Use xformers acceleration", - value=saved_args.get("enable_xformers_memory_efficient_attention", False), + value=saved_args.get("enable_xformers_memory_efficient_attention", XFORMERS_AVAILABLE), scroll_exit=True, ) self.lr_scheduler = self.add_widget_intelligent( @@ -428,11 +429,12 @@ def do_front_end(args: Namespace): print(str(e)) print("** DETAILS:") print(traceback.format_exc()) - - + def main(): args = parse_args() global_set_root(args.root_dir or Globals.root) + print(XFORMERS_AVAILABLE,file=sys.stderr) + sys.exit(0) try: if args.front_end: do_front_end(args) diff --git a/ldm/invoke/training/textual_inversion_training.py b/ldm/invoke/training/textual_inversion_training.py index 7794712bc1..efc0986d6c 100644 --- a/ldm/invoke/training/textual_inversion_training.py +++ b/ldm/invoke/training/textual_inversion_training.py @@ -67,7 +67,7 @@ else: "nearest": PIL.Image.NEAREST, } # ------------------------------------------------------------------------------ - +XFORMERS_AVAILABLE = is_xformers_available # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.10.0.dev0") @@ -227,7 +227,7 @@ def parse_args(): training_group.add_argument( "--train_batch_size", type=int, - default=16, + default=8 if XFORMERS_AVAILABLE else 3, help="Batch size (per device) for the training dataloader.", ) training_group.add_argument("--num_train_epochs", type=int, default=100) @@ -324,6 +324,7 @@ def parse_args(): parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", + default=XFORMERS_AVAILABLE, help="Whether or not to use xformers.", ) @@ -536,7 +537,7 @@ def do_textual_inversion_training( seed: int = None, resolution: int = 512, center_crop: bool = False, - train_batch_size: int = 16, + train_batch_size: int = 4, num_train_epochs: int = 100, max_train_steps: int = 5000, gradient_accumulation_steps: int = 1, From 4515d52a426ed179949452f8f35e1d4f2f75d000 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 23 Mar 2023 21:00:54 -0400 Subject: [PATCH 2/2] fix textual inversion documentation and code This PR addresses issues raised by #3008. 1. Update documentation to indicate the correct maximum batch size for TI training when xformers is and isn't used. 2. Update textual inversion code so that the default for batch size is aware of xformer availability. 3. Add documentation for how to launch TI with distributed learning. --- docs/features/TEXTUAL_INVERSION.md | 56 ++++++++++++++++++++++-- ldm/invoke/training/textual_inversion.py | 2 - 2 files changed, 52 insertions(+), 6 deletions(-) diff --git a/docs/features/TEXTUAL_INVERSION.md b/docs/features/TEXTUAL_INVERSION.md index c0b34e3f7c..ebb09d6ee2 100644 --- a/docs/features/TEXTUAL_INVERSION.md +++ b/docs/features/TEXTUAL_INVERSION.md @@ -154,8 +154,11 @@ training sets will converge with 2000-3000 steps. This adjusts how many training images are processed simultaneously in each step. Higher values will cause the training process to run more -quickly, but use more memory. The default size will run with GPUs with -as little as 12 GB. +quickly, but use more memory. The default size is selected based on +whether you have the `xformers` memory-efficient attention library +installed. If `xformers` is available, the batch size will be 8, +otherwise 3. These values were chosen to allow training to run with +GPUs with as little as 12 GB VRAM. ### Learning rate @@ -172,8 +175,10 @@ learning rate to improve performance. ### Use xformers acceleration -This will activate XFormers memory-efficient attention. You need to -have XFormers installed for this to have an effect. +This will activate XFormers memory-efficient attention, which will +reduce memory requirements by half or more and allow you to select a +higher batch size. You need to have XFormers installed for this to +have an effect. ### Learning rate scheduler @@ -250,6 +255,49 @@ invokeai-ti \ --only_save_embeds ``` +## Using Distributed Training + +If you have multiple GPUs on one machine, or a cluster of GPU-enabled +machines, you can activate distributed training. See the [HuggingFace +Accelerate pages](https://huggingface.co/docs/accelerate/index) for +full information, but the basic recipe is: + +1. Enter the InvokeAI developer's console command line by selecting +option [8] from the `invoke.sh`/`invoke.bat` script. + +2. Configurate Accelerate using `accelerate config`: +```sh +accelerate config +``` +This will guide you through the configuration process, including +specifying how many machines you will run training on and the number +of GPUs pe rmachine. + +You only need to do this once. + +3. Launch training from the command line using `accelerate launch`. Be sure +that your current working directory is the InvokeAI root directory (usually +named `invokeai` in your home directory): + +```sh +accelerate launch .venv/bin/invokeai-ti \ + --model=stable-diffusion-1.5 \ + --resolution=512 \ + --learnable_property=object \ + --initializer_token='*' \ + --placeholder_token='' \ + --train_data_dir=/home/lstein/invokeai/text-inversion-training-data/shraddha \ + --output_dir=/home/lstein/invokeai/text-inversion-training/shraddha \ + --scale_lr \ + --train_batch_size=10 \ + --gradient_accumulation_steps=4 \ + --max_train_steps=2000 \ + --learning_rate=0.0005 \ + --lr_scheduler=constant \ + --mixed_precision=fp16 \ + --only_save_embeds +``` + ## Using Embeddings After training completes, the resultant embeddings will be saved into your `$INVOKEAI_ROOT/embeddings//learned_embeds.bin`. diff --git a/ldm/invoke/training/textual_inversion.py b/ldm/invoke/training/textual_inversion.py index f620bbf71e..f1e8e2d679 100755 --- a/ldm/invoke/training/textual_inversion.py +++ b/ldm/invoke/training/textual_inversion.py @@ -433,8 +433,6 @@ def do_front_end(args: Namespace): def main(): args = parse_args() global_set_root(args.root_dir or Globals.root) - print(XFORMERS_AVAILABLE,file=sys.stderr) - sys.exit(0) try: if args.front_end: do_front_end(args)