mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into lstein-improve-ti-frontend
This commit is contained in:
@ -291,7 +291,7 @@ for more information.
|
||||
|
||||
Visit https://huggingface.co/settings/tokens to generate a token. (Sign up for an account if needed).
|
||||
|
||||
Paste the token below using Ctrl-V on macOS/Linux, or Ctrl-Shift-V or right-click on Windows.
|
||||
Paste the token below using Ctrl-V on macOS/Linux, or Ctrl-Shift-V or right-click on Windows.
|
||||
Alternatively press 'Enter' to skip this step and continue.
|
||||
You may re-run the configuration script again in the future if you do not wish to set the token right now.
|
||||
''')
|
||||
@ -676,7 +676,8 @@ def download_weights(opt:dict) -> Union[str, None]:
|
||||
return
|
||||
|
||||
access_token = authenticate()
|
||||
HfFolder.save_token(access_token)
|
||||
if access_token is not None:
|
||||
HfFolder.save_token(access_token)
|
||||
|
||||
print('\n** DOWNLOADING WEIGHTS **')
|
||||
successfully_downloaded = download_weight_datasets(models, access_token, precision=precision)
|
||||
|
@ -121,6 +121,14 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
value=self.precisions.index(saved_args.get('mixed_precision','fp16')),
|
||||
max_height=4,
|
||||
)
|
||||
self.num_train_epochs = self.add_widget_intelligent(
|
||||
npyscreen.TitleSlider,
|
||||
name='Number of training epochs:',
|
||||
out_of=1000,
|
||||
step=50,
|
||||
lowest=1,
|
||||
value=saved_args.get('num_train_epochs',100)
|
||||
)
|
||||
self.max_train_steps = self.add_widget_intelligent(
|
||||
npyscreen.TitleSlider,
|
||||
name='Max Training Steps:',
|
||||
@ -137,6 +145,22 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
lowest=1,
|
||||
value=saved_args.get('train_batch_size',8),
|
||||
)
|
||||
self.gradient_accumulation_steps = self.add_widget_intelligent(
|
||||
npyscreen.TitleSlider,
|
||||
name='Gradient Accumulation Steps (may need to decrease this to resume from a checkpoint):',
|
||||
out_of=10,
|
||||
step=1,
|
||||
lowest=1,
|
||||
value=saved_args.get('gradient_accumulation_steps',4)
|
||||
)
|
||||
self.lr_warmup_steps = self.add_widget_intelligent(
|
||||
npyscreen.TitleSlider,
|
||||
name='Warmup Steps:',
|
||||
out_of=100,
|
||||
step=1,
|
||||
lowest=0,
|
||||
value=saved_args.get('lr_warmup_steps',0),
|
||||
)
|
||||
self.learning_rate = self.add_widget_intelligent(
|
||||
npyscreen.TitleText,
|
||||
name="Learning Rate:",
|
||||
@ -160,22 +184,6 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
scroll_exit = True,
|
||||
value=self.lr_schedulers.index(saved_args.get('lr_scheduler','constant')),
|
||||
)
|
||||
self.gradient_accumulation_steps = self.add_widget_intelligent(
|
||||
npyscreen.TitleSlider,
|
||||
name='Gradient Accumulation Steps:',
|
||||
out_of=10,
|
||||
step=1,
|
||||
lowest=1,
|
||||
value=saved_args.get('gradient_accumulation_steps',4)
|
||||
)
|
||||
self.lr_warmup_steps = self.add_widget_intelligent(
|
||||
npyscreen.TitleSlider,
|
||||
name='Warmup Steps:',
|
||||
out_of=100,
|
||||
step=1,
|
||||
lowest=0,
|
||||
value=saved_args.get('lr_warmup_steps',0),
|
||||
)
|
||||
|
||||
def initializer_changed(self):
|
||||
placeholder = self.placeholder_token.value
|
||||
@ -242,7 +250,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
|
||||
# all the integers
|
||||
for attr in ('train_batch_size','gradient_accumulation_steps',
|
||||
'max_train_steps','lr_warmup_steps'):
|
||||
'num_train_epochs','max_train_steps','lr_warmup_steps'):
|
||||
args[attr] = int(getattr(self,attr).value)
|
||||
|
||||
# the floats (just one)
|
||||
@ -332,6 +340,7 @@ if __name__ == '__main__':
|
||||
save_args(args)
|
||||
|
||||
try:
|
||||
print(f'DEBUG: args = {args}')
|
||||
do_textual_inversion_training(**args)
|
||||
copy_to_embeddings_folder(args)
|
||||
except Exception as e:
|
||||
|
Reference in New Issue
Block a user