From c660dcdfcd885d95ea068cf2085ca089613983e6 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 11 Feb 2023 17:59:12 -0500 Subject: [PATCH] improve ability to bulk import .ckpt and .safetensors This commit cleans up the code that did bulk imports of legacy model files. The code has been refactored, and the user is now offered the option of importing all the model files found in the directory, or selecting which ones to import. --- docs/installation/050_INSTALLING_MODELS.md | 46 ++++++++--- ldm/invoke/CLI.py | 89 +++++++++++++++------- 2 files changed, 97 insertions(+), 38 deletions(-) diff --git a/docs/installation/050_INSTALLING_MODELS.md b/docs/installation/050_INSTALLING_MODELS.md index a9b6d090e4..6fc015fecc 100644 --- a/docs/installation/050_INSTALLING_MODELS.md +++ b/docs/installation/050_INSTALLING_MODELS.md @@ -95,7 +95,7 @@ There are multiple ways to install and manage models: models files. 3. The web interface (WebUI) has a GUI for importing and managing -models. + models. ### Installation via `invokeai-configure` @@ -111,7 +111,7 @@ confirm that the files are complete. You can install a new model, including any of the community-supported ones, via the command-line client's `!import_model` command. -#### Installing `.ckpt` and `.safetensors` models +#### Installing individual `.ckpt` and `.safetensors` models If the model is already downloaded to your local disk, use `!import_model /path/to/file.ckpt` to load it. For example: @@ -136,15 +136,39 @@ invoke> !import_model https://example.org/sd_models/martians.safetensors For this to work, the URL must not be password-protected. Otherwise you will receive a 404 error. -When you import a legacy model, the CLI will ask you a few questions -about the model, including what size image it was trained on (usually -512x512), what name and description you wish to use for it, what -configuration file to use for it (usually the default -`v1-inference.yaml`), whether you'd like to make this model the -default at startup time, and whether you would like to install a -custom VAE (variable autoencoder) file for the model. For recent -models, the answer to the VAE question is usually "no," but it won't -hurt to answer "yes". +When you import a legacy model, the CLI will first ask you what type +of model this is. You can indicate whether it is a model based on +Stable Diffusion 1.x (1.4 or 1.5), one based on Stable Diffusion 2.x, +or a 1.x inpainting model. Be careful to indicate the correct model +type, or it will not load correctly. You can correct the model type +after the fact using the `!edit_model` command. + +The system will then ask you a few other questions about the model, +including what size image it was trained on (usually 512x512), what +name and description you wish to use for it, and whether you would +like to install a custom VAE (variable autoencoder) file for the +model. For recent models, the answer to the VAE question is usually +"no," but it won't hurt to answer "yes". + +After importing, the model will load. If this is successful, you will +be asked if you want to keep the model loaded in memory to start +generating immediately. You'll also be asked if you wish to make this +the default model on startup. You can change this later using +`!edit_model`. + +#### Importing a batch of `.ckpt` and `.safetensors` models from a directory + +You may also point `!import_model` to a directory containing a set of +`.ckpt` or `.safetensors` files. They will be imported _en masse_. + +Example: +```console +invoke> !import_model C:/Users/fred/Downloads/civitai_models/ +``` + +You will be given the option to import all models found in the +directory, or select which ones to import. If there are subfolders +within the directory, they will be searched for models to import. #### Installing `diffusers` models diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index 49c4d82a3d..8c50cbd23e 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -1,3 +1,4 @@ +import click import os import re import sys @@ -6,7 +7,7 @@ import traceback from argparse import Namespace from pathlib import Path -from typing import Optional, Union +from typing import Optional, Union, List if sys.platform == "darwin": os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" @@ -21,7 +22,6 @@ from ldm.invoke.image_util import make_grid from ldm.invoke.log import write_log from ldm.invoke.model_manager import ModelManager -import click # type: ignore import ldm.invoke import pyparsing # type: ignore @@ -592,12 +592,8 @@ def import_model(model_path: str, gen, opt, completer): models = list(Path(model_path).rglob('*.ckpt')) + list(Path(model_path).rglob('*.safetensors')) if models: - # Only the last model name will be used below. - for model in sorted(models): - - if click.confirm(f'Import {model.stem} ?', default=True): - model_name = import_ckpt_model(model, gen, opt, completer) - print() + models = import_checkpoint_list(models, gen, opt, completer) + model_name = models[0] if len(models) == 1 else None else: model_name = import_diffuser_model(Path(model_path), gen, opt, completer) @@ -614,13 +610,49 @@ def import_model(model_path: str, gen, opt, completer): print('** model failed to load. Discarding configuration entry') gen.model_manager.del_model(model_name) return - if input('Make this the default model? [n] ').strip() in ('y','Y'): + if click.confirm('Make this the default model?', default=False): gen.model_manager.set_default_model(model_name) gen.model_manager.commit(opt.conf) completer.update_models(gen.model_manager.list_models()) print(f'>> {model_name} successfully installed') +def import_checkpoint_list(models: List[Path], gen, opt, completer)->List[str]: + ''' + Does a mass import of all the checkpoint/safetensors on a path list + ''' + model_names = list() + choice = input('** Directory of checkpoint/safetensors models detected. Install ll or elected models? [a] ') or 'a' + do_all = choice.startswith('a') + if do_all: + config_file = _ask_for_config_file(models[0], completer, plural=True) + manager = gen.model_manager + for model in sorted(models): + model_name = f'{model.stem}' + model_description = f'Imported model {model_name}' + if model_name in manager.model_names(): + print(f'** {model_name} is already imported. Skipping.') + elif manager.import_ckpt_model( + model, + config = config_file, + model_name = model_name, + model_description = model_description, + commit_to_conf = opt.conf): + model_names.append(model_name) + print(f'>> Model {model_name} imported successfully') + else: + print(f'** Model {model} failed to import') + else: + for model in sorted(models): + if click.confirm(f'Import {model.stem} ?', default=True): + if model_name := import_ckpt_model(model, gen, opt, completer): + print(f'>> Model {model.stem} imported successfully') + model_names.append(model_name) + else: + printf('** Model {model} failed to import') + print() + return model_names + def import_diffuser_model(path_or_repo: Union[Path, str], gen, _, completer) -> Optional[str]: manager = gen.model_manager default_name = Path(path_or_repo).stem @@ -632,7 +664,7 @@ def import_diffuser_model(path_or_repo: Union[Path, str], gen, _, completer) -> model_description=default_description ) vae = None - if input('Replace this model\'s VAE with "stabilityai/sd-vae-ft-mse"? [n] ').strip() in ('y','Y'): + if click.confirm('Replace this model\'s VAE with "stabilityai/sd-vae-ft-mse"?', default=False): vae = dict(repo_id='stabilityai/sd-vae-ft-mse') if not manager.import_diffuser_model( @@ -696,8 +728,7 @@ def _verify_load(model_name:str, gen)->bool: print('** note that importing 2.X checkpoints is not supported. Please use !convert_model instead.') return False - do_switch = input('Keep model loaded? [y] ') - if len(do_switch)==0 or do_switch[0] in ('y','Y'): + if click.confirm('Keep model loaded?', default=True): gen.set_model(model_name) else: print('>> Restoring previous model') @@ -710,20 +741,26 @@ def _get_model_name_and_desc(model_manager,completer,model_name:str='',model_des model_description = input(f'Description for this model [{model_description}]: ').strip() or model_description return model_name, model_description -def _ask_for_config_file(model_path: Union[str,Path], completer)->Path: - default = 1 +def _ask_for_config_file(model_path: Union[str,Path], completer, plural: bool=False)->Path: + default = '1' if re.search('inpaint',str(model_path),flags=re.IGNORECASE): - default = 3 + default = '3' choices={ '1': 'v1-inference.yaml', '2': 'v2-inference-v.yaml', '3': 'v1-inpainting-inference.yaml', } - print('''What type of model is this?: + + prompt = '''What type of models are these?: +[1] Models based on Stable Diffusion 1.X +[2] Models based on Stable Diffusion 2.X +[3] Inpainting models based on Stable Diffusion 1.X +[4] Something else''' if plural else '''What type of model is this?: [1] A model based on Stable Diffusion 1.X [2] A model based on Stable Diffusion 2.X -[3] An inpainting model based on Stable Diffusion 1.X -[4] Something else''') +[3] An inpainting models based on Stable Diffusion 1.X +[4] Something else''' + print(prompt) choice = input(f'Your choice: [{default}] ') choice = choice.strip() or default if config_file := choices.get(choice,None): @@ -782,7 +819,7 @@ def optimize_model(model_name_or_path:str, gen, opt, completer, original_config_ return vae = None - if input('Replace this model\'s VAE with "stabilityai/sd-vae-ft-mse"? [n] ').strip() in ('y','Y'): + if click.confirm('Replace this model\'s VAE with "stabilityai/sd-vae-ft-mse"?', default=False): vae = dict(repo_id='stabilityai/sd-vae-ft-mse') new_config = gen.model_manager.convert_and_import( @@ -798,11 +835,10 @@ def optimize_model(model_name_or_path:str, gen, opt, completer, original_config_ return completer.update_models(gen.model_manager.list_models()) - if input(f'Load optimized model {model_name}? [y] ').strip() not in ('n','N'): + if click.confirm(f'Load optimized model {model_name}?', default=True): gen.set_model(model_name) - response = input(f'Delete the original .ckpt file at ({ckpt_path} ? [n] ') - if response.startswith(('y','Y')): + if click.confirm(f'Delete the original .ckpt file at {ckpt_path}?',default=False): ckpt_path.unlink(missing_ok=True) print(f'{ckpt_path} deleted') @@ -815,10 +851,10 @@ def del_config(model_name:str, gen, opt, completer): print(f"** Unknown model {model_name}") return - if input(f'Remove {model_name} from the list of models known to InvokeAI? [y] ').strip().startswith(('n','N')): + if not click.confirm(f'Remove {model_name} from the list of models known to InvokeAI?',default=True): return - delete_completely = input('Completely remove the model file or directory from disk? [n] ').startswith(('y','Y')) + delete_completely = click.confirm('Completely remove the model file or directory from disk?',default=False) gen.model_manager.del_model(model_name,delete_files=delete_completely) gen.model_manager.commit(opt.conf) print(f'** {model_name} deleted') @@ -847,7 +883,7 @@ def edit_model(model_name:str, gen, opt, completer): # this does the update manager.add_model(new_name, info, True) - if input('Make this the default model? [n] ').startswith(('y','Y')): + if click.confirm('Make this the default model?',default=False): manager.set_default_model(new_name) manager.commit(opt.conf) completer.update_models(manager.list_models()) @@ -1179,8 +1215,7 @@ def report_model_error(opt:Namespace, e:Exception): if yes_to_all: print('** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE') else: - response = input('Do you want to run invokeai-configure script to select and/or reinstall models? [y] ') - if response.startswith(('n', 'N')): + if click.confirm('Do you want to run invokeai-configure script to select and/or reinstall models?', default=True): return print('invokeai-configure is launching....\n')