mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
adjust textual inversion training parameters according to xformers availability
- If xformers is available, then default "use xformers" checkbox to on. - Increase batch size to 8 (from 3).
This commit is contained in:
parent
b856fac713
commit
2a8513eee0
@ -17,6 +17,7 @@ from pathlib import Path
|
|||||||
from typing import List, Tuple
|
from typing import List, Tuple
|
||||||
|
|
||||||
import npyscreen
|
import npyscreen
|
||||||
|
from diffusers.utils.import_utils import is_xformers_available
|
||||||
from npyscreen import widget
|
from npyscreen import widget
|
||||||
from omegaconf import OmegaConf
|
from omegaconf import OmegaConf
|
||||||
|
|
||||||
@ -29,7 +30,7 @@ from ldm.invoke.training.textual_inversion_training import (
|
|||||||
TRAINING_DATA = "text-inversion-training-data"
|
TRAINING_DATA = "text-inversion-training-data"
|
||||||
TRAINING_DIR = "text-inversion-output"
|
TRAINING_DIR = "text-inversion-output"
|
||||||
CONF_FILE = "preferences.conf"
|
CONF_FILE = "preferences.conf"
|
||||||
|
XFORMERS_AVAILABLE = is_xformers_available()
|
||||||
|
|
||||||
class textualInversionForm(npyscreen.FormMultiPageAction):
|
class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||||
resolutions = [512, 768, 1024]
|
resolutions = [512, 768, 1024]
|
||||||
@ -178,7 +179,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
|||||||
out_of=10000,
|
out_of=10000,
|
||||||
step=500,
|
step=500,
|
||||||
lowest=1,
|
lowest=1,
|
||||||
value=saved_args.get("max_train_steps", 3000),
|
value=saved_args.get("max_train_steps", 2500),
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.train_batch_size = self.add_widget_intelligent(
|
self.train_batch_size = self.add_widget_intelligent(
|
||||||
@ -187,7 +188,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
|||||||
out_of=50,
|
out_of=50,
|
||||||
step=1,
|
step=1,
|
||||||
lowest=1,
|
lowest=1,
|
||||||
value=saved_args.get("train_batch_size", 8),
|
value=saved_args.get("train_batch_size", 8 if XFORMERS_AVAILABLE else 3),
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.gradient_accumulation_steps = self.add_widget_intelligent(
|
self.gradient_accumulation_steps = self.add_widget_intelligent(
|
||||||
@ -225,7 +226,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
|||||||
self.enable_xformers_memory_efficient_attention = self.add_widget_intelligent(
|
self.enable_xformers_memory_efficient_attention = self.add_widget_intelligent(
|
||||||
npyscreen.Checkbox,
|
npyscreen.Checkbox,
|
||||||
name="Use xformers acceleration",
|
name="Use xformers acceleration",
|
||||||
value=saved_args.get("enable_xformers_memory_efficient_attention", False),
|
value=saved_args.get("enable_xformers_memory_efficient_attention", XFORMERS_AVAILABLE),
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.lr_scheduler = self.add_widget_intelligent(
|
self.lr_scheduler = self.add_widget_intelligent(
|
||||||
@ -429,10 +430,11 @@ def do_front_end(args: Namespace):
|
|||||||
print("** DETAILS:")
|
print("** DETAILS:")
|
||||||
print(traceback.format_exc())
|
print(traceback.format_exc())
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
global_set_root(args.root_dir or Globals.root)
|
global_set_root(args.root_dir or Globals.root)
|
||||||
|
print(XFORMERS_AVAILABLE,file=sys.stderr)
|
||||||
|
sys.exit(0)
|
||||||
try:
|
try:
|
||||||
if args.front_end:
|
if args.front_end:
|
||||||
do_front_end(args)
|
do_front_end(args)
|
||||||
|
@ -67,7 +67,7 @@ else:
|
|||||||
"nearest": PIL.Image.NEAREST,
|
"nearest": PIL.Image.NEAREST,
|
||||||
}
|
}
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
XFORMERS_AVAILABLE = is_xformers_available
|
||||||
|
|
||||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||||
check_min_version("0.10.0.dev0")
|
check_min_version("0.10.0.dev0")
|
||||||
@ -227,7 +227,7 @@ def parse_args():
|
|||||||
training_group.add_argument(
|
training_group.add_argument(
|
||||||
"--train_batch_size",
|
"--train_batch_size",
|
||||||
type=int,
|
type=int,
|
||||||
default=16,
|
default=8 if XFORMERS_AVAILABLE else 3,
|
||||||
help="Batch size (per device) for the training dataloader.",
|
help="Batch size (per device) for the training dataloader.",
|
||||||
)
|
)
|
||||||
training_group.add_argument("--num_train_epochs", type=int, default=100)
|
training_group.add_argument("--num_train_epochs", type=int, default=100)
|
||||||
@ -324,6 +324,7 @@ def parse_args():
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--enable_xformers_memory_efficient_attention",
|
"--enable_xformers_memory_efficient_attention",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
|
default=XFORMERS_AVAILABLE,
|
||||||
help="Whether or not to use xformers.",
|
help="Whether or not to use xformers.",
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -536,7 +537,7 @@ def do_textual_inversion_training(
|
|||||||
seed: int = None,
|
seed: int = None,
|
||||||
resolution: int = 512,
|
resolution: int = 512,
|
||||||
center_crop: bool = False,
|
center_crop: bool = False,
|
||||||
train_batch_size: int = 16,
|
train_batch_size: int = 4,
|
||||||
num_train_epochs: int = 100,
|
num_train_epochs: int = 100,
|
||||||
max_train_steps: int = 5000,
|
max_train_steps: int = 5000,
|
||||||
gradient_accumulation_steps: int = 1,
|
gradient_accumulation_steps: int = 1,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user