adjust textual inversion training parameters according to xformers availability

- If xformers is available, then default "use xformers" checkbox to on.
- Increase batch size to 8 (from 3).
This commit is contained in:
Lincoln Stein 2023-03-23 19:49:13 -04:00
parent b856fac713
commit 2a8513eee0
2 changed files with 12 additions and 9 deletions

View File

@ -17,6 +17,7 @@ from pathlib import Path
from typing import List, Tuple
import npyscreen
from diffusers.utils.import_utils import is_xformers_available
from npyscreen import widget
from omegaconf import OmegaConf
@ -29,7 +30,7 @@ from ldm.invoke.training.textual_inversion_training import (
TRAINING_DATA = "text-inversion-training-data"
TRAINING_DIR = "text-inversion-output"
CONF_FILE = "preferences.conf"
XFORMERS_AVAILABLE = is_xformers_available()
class textualInversionForm(npyscreen.FormMultiPageAction):
resolutions = [512, 768, 1024]
@ -178,7 +179,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
out_of=10000,
step=500,
lowest=1,
value=saved_args.get("max_train_steps", 3000),
value=saved_args.get("max_train_steps", 2500),
scroll_exit=True,
)
self.train_batch_size = self.add_widget_intelligent(
@ -187,7 +188,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
out_of=50,
step=1,
lowest=1,
value=saved_args.get("train_batch_size", 8),
value=saved_args.get("train_batch_size", 8 if XFORMERS_AVAILABLE else 3),
scroll_exit=True,
)
self.gradient_accumulation_steps = self.add_widget_intelligent(
@ -225,7 +226,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
self.enable_xformers_memory_efficient_attention = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Use xformers acceleration",
value=saved_args.get("enable_xformers_memory_efficient_attention", False),
value=saved_args.get("enable_xformers_memory_efficient_attention", XFORMERS_AVAILABLE),
scroll_exit=True,
)
self.lr_scheduler = self.add_widget_intelligent(
@ -428,11 +429,12 @@ def do_front_end(args: Namespace):
print(str(e))
print("** DETAILS:")
print(traceback.format_exc())
def main():
args = parse_args()
global_set_root(args.root_dir or Globals.root)
print(XFORMERS_AVAILABLE,file=sys.stderr)
sys.exit(0)
try:
if args.front_end:
do_front_end(args)

View File

@ -67,7 +67,7 @@ else:
"nearest": PIL.Image.NEAREST,
}
# ------------------------------------------------------------------------------
XFORMERS_AVAILABLE = is_xformers_available
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.10.0.dev0")
@ -227,7 +227,7 @@ def parse_args():
training_group.add_argument(
"--train_batch_size",
type=int,
default=16,
default=8 if XFORMERS_AVAILABLE else 3,
help="Batch size (per device) for the training dataloader.",
)
training_group.add_argument("--num_train_epochs", type=int, default=100)
@ -324,6 +324,7 @@ def parse_args():
parser.add_argument(
"--enable_xformers_memory_efficient_attention",
action="store_true",
default=XFORMERS_AVAILABLE,
help="Whether or not to use xformers.",
)
@ -536,7 +537,7 @@ def do_textual_inversion_training(
seed: int = None,
resolution: int = 512,
center_crop: bool = False,
train_batch_size: int = 16,
train_batch_size: int = 4,
num_train_epochs: int = 100,
max_train_steps: int = 5000,
gradient_accumulation_steps: int = 1,