From 714fff39ba07de1994963e83a7c126eee5053e16 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 12 Feb 2023 23:52:44 -0500 Subject: [PATCH 01/36] add new console frontend to initial model selection, and other improvements 1. The invokeai-configure script has now been refactored. The work of selecting and downloading initial models at install time is now done by a script named invokeai-initial-models (module name is ldm.invoke.config.initial_model_select) The calling arguments for invokeai-configure have not changed, so nothing should break. After initializing the root directory, the script calls invokeai-initial-models to let the user select the starting models to install. 2. invokeai-initial-models puts up a console GUI with checkboxes to indicate which models to install. It respects the --default_only and --yes arguments so that CI will continue to work. 3. User can now edit the VAE assigned to diffusers models in the CLI. 4. Fixed a bug that caused a crash during model loading when the VAE is set to None, rather than being empty. --- installer/templates/invoke.bat.in | 10 +- installer/templates/invoke.sh.in | 20 +- invokeai/configs/INITIAL_MODELS.yaml | 2 +- ldm/invoke/CLI.py | 12 + ldm/invoke/config/invokeai_configure.py | 507 ++---------------------- ldm/invoke/model_manager.py | 41 +- pyproject.toml | 1 + 7 files changed, 85 insertions(+), 508 deletions(-) diff --git a/installer/templates/invoke.bat.in b/installer/templates/invoke.bat.in index f1a5ca5ef2..838c860bea 100644 --- a/installer/templates/invoke.bat.in +++ b/installer/templates/invoke.bat.in @@ -12,8 +12,9 @@ echo 2. browser-based UI echo 3. run textual inversion training echo 4. merge models (diffusers type only) echo 5. re-run the configure script to download new models -echo 6. open the developer console -echo 7. command-line help +echo 6. download more starter models from HuggingFace +echo 7. open the developer console +echo 8. command-line help set /P restore="Please enter 1, 2, 3, 4, 5, 6 or 7: [2] " if not defined restore set restore=2 IF /I "%restore%" == "1" ( @@ -32,6 +33,9 @@ IF /I "%restore%" == "1" ( echo Running invokeai-configure... python .venv\Scripts\invokeai-configure.exe %* ) ELSE IF /I "%restore%" == "6" ( + echo Running invokeai-initial-models... + python .venv\Scripts\invokeai-initial-models.exe %* +) ELSE IF /I "%restore%" == "7" ( echo Developer Console echo Python command is: where python @@ -43,7 +47,7 @@ IF /I "%restore%" == "1" ( echo ************************* echo *** Type `exit` to quit this shell and deactivate the Python virtual environment *** call cmd /k -) ELSE IF /I "%restore%" == "7" ( +) ELSE IF /I "%restore%" == "8" ( echo Displaying command line help... python .venv\Scripts\invokeai.exe --help %* pause diff --git a/installer/templates/invoke.sh.in b/installer/templates/invoke.sh.in index 5c08d7c23b..10819de7f1 100644 --- a/installer/templates/invoke.sh.in +++ b/installer/templates/invoke.sh.in @@ -30,11 +30,12 @@ if [ "$0" != "bash" ]; then echo "2. browser-based UI" echo "3. run textual inversion training" echo "4. merge models (diffusers type only)" - echo "5. open the developer console" - echo "6. re-run the configure script to download new models" - echo "7. command-line help " + echo "5. re-run the configure script to fix a broken install" + echo "6. download more starter models from HuggingFace" + echo "7. open the developer console" + echo "8. command-line help " echo "" - read -p "Please enter 1, 2, 3, 4, 5, 6 or 7: [2] " yn + read -p "Please enter 1, 2, 3, 4, 5, 6, 7 or 8: [2] " yn choice=${yn:='2'} case $choice in 1) @@ -54,14 +55,17 @@ if [ "$0" != "bash" ]; then exec invokeai-merge --gui $@ ;; 5) + exec invokeai-configure --root ${INVOKEAI_ROOT} + ;; + 6) + exec invokeai-initial-models --root ${INVOKEAI_ROOT} + ;; + 7) echo "Developer Console:" file_name=$(basename "${BASH_SOURCE[0]}") bash --init-file "$file_name" ;; - 6) - exec invokeai-configure --root ${INVOKEAI_ROOT} - ;; - 7) + 8) exec invokeai --help ;; *) diff --git a/invokeai/configs/INITIAL_MODELS.yaml b/invokeai/configs/INITIAL_MODELS.yaml index 200c5bdf06..42a3a29538 100644 --- a/invokeai/configs/INITIAL_MODELS.yaml +++ b/invokeai/configs/INITIAL_MODELS.yaml @@ -68,7 +68,7 @@ trinart-characters-2_0: width: 512 height: 512 recommended: False -ft-mse-improved-autoencoder-840000: +autoencoder-840000: description: StabilityAI improved autoencoder fine-tuned for human faces. Improves legacy .ckpt models (335 MB) repo_id: stabilityai/sd-vae-ft-mse-original format: ckpt diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index 32c6d816be..65a4cc7cc8 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -820,6 +820,18 @@ def edit_model(model_name:str, gen, opt, completer): completer.set_line(info[attribute]) info[attribute] = input(f'{attribute}: ') or info[attribute] + if info['format'] == 'diffusers': + vae = info.get('vae',dict(repo_id=None,path=None,subfolder=None)) + completer.set_line(vae.get('repo_id') or 'stabilityai/sd-vae-ft-mse') + vae['repo_id'] = input('External VAE repo_id: ').strip() or None + if not vae['repo_id']: + completer.set_line(vae.get('path') or '') + vae['path'] = input('Path to a local diffusers VAE model (usually none): ').strip() or None + completer.set_line(vae.get('subfolder') or '') + vae['subfolder'] = input('Name of subfolder containing the VAE model (usually none): ').strip() or None + info['vae'] = vae + + if new_name != model_name: manager.del_model(model_name) diff --git a/ldm/invoke/config/invokeai_configure.py b/ldm/invoke/config/invokeai_configure.py index ef45a023d6..08d95e1ea4 100755 --- a/ldm/invoke/config/invokeai_configure.py +++ b/ldm/invoke/config/invokeai_configure.py @@ -10,37 +10,36 @@ print("Loading Python libraries...\n") import argparse import io import os -import re import shutil import sys import traceback import warnings from pathlib import Path -from tempfile import TemporaryFile -from typing import Union from urllib import request -import requests import transformers -from diffusers import AutoencoderKL from getpass_asterisk import getpass_asterisk -from huggingface_hub import HfFolder, hf_hub_url +from huggingface_hub import HfFolder from huggingface_hub import login as hf_hub_login from omegaconf import OmegaConf -from omegaconf.dictconfig import DictConfig from tqdm import tqdm -from transformers import (AutoProcessor, CLIPSegForImageSegmentation, - CLIPTextModel, CLIPTokenizer) +from transformers import ( + AutoProcessor, + CLIPSegForImageSegmentation, + CLIPTextModel, + CLIPTokenizer, +) import invokeai.configs as configs -from ldm.invoke.devices import choose_precision, choose_torch_device -from ldm.invoke.generator.diffusers_pipeline import \ - StableDiffusionGeneratorPipeline -from ldm.invoke.globals import Globals, global_cache_dir, global_config_dir +from ldm.invoke.config.initial_model_select import ( + download_from_hf, + select_and_download_models, + yes_or_no, +) +from ldm.invoke.globals import Globals, global_config_dir from ldm.invoke.readline import generic_completer warnings.filterwarnings("ignore") -import torch transformers.logging.set_verbosity_error() @@ -104,125 +103,6 @@ Have fun! print(message) - -# --------------------------------------------- -def yes_or_no(prompt: str, default_yes=True): - completer.set_options(["yes", "no"]) - completer.complete_extensions(None) # turn off path-completion mode - default = "y" if default_yes else "n" - response = input(f"{prompt} [{default}] ") or default - if default_yes: - return response[0] not in ("n", "N") - else: - return response[0] in ("y", "Y") - - -# --------------------------------------------- -def user_wants_to_download_weights() -> str: - """ - Returns one of "skip", "recommended" or "customized" - """ - print( - """You can download and configure the weights files manually or let this -script do it for you. Manual installation is described at: - -https://invoke-ai.github.io/InvokeAI/installation/020_INSTALL_MANUAL/ - -You may download the recommended models (about 15GB total), install all models (40 GB!!) -select a customized set, or completely skip this step. -""" - ) - completer.set_options(["recommended", "customized", "skip"]) - completer.complete_extensions(None) # turn off path-completion mode - selection = None - while selection is None: - choice = input( - "Download ecommended models, ll models, ustomized list, or kip this step? [r]: " - ) - if choice.startswith(("r", "R")) or len(choice) == 0: - selection = "recommended" - elif choice.startswith(("c", "C")): - selection = "customized" - elif choice.startswith(("a", "A")): - selection = "all" - elif choice.startswith(("s", "S")): - selection = "skip" - return selection - - -# --------------------------------------------- -def select_datasets(action: str): - done = False - default_datasets = default_dataset() - while not done: - datasets = dict() - counter = 1 - - if action == "customized": - print( - """ -Choose the weight file(s) you wish to download. Before downloading you -will be given the option to view and change your selections. -""" - ) - for ds in Datasets.keys(): - recommended = Datasets[ds].get("recommended", False) - r_str = "(recommended)" if recommended else "" - print(f'[{counter}] {ds}:\n {Datasets[ds]["description"]} {r_str}') - if yes_or_no(" Download?", default_yes=recommended): - datasets[ds] = True - counter += 1 - else: - for ds in Datasets.keys(): - if Datasets[ds].get("recommended", False): - datasets[ds] = True - counter += 1 - - print("The following weight files will be downloaded:") - counter = 1 - for ds in datasets: - dflt = "*" if ds in default_datasets else "" - print(f" [{counter}] {ds}{dflt}") - counter += 1 - print("* default") - ok_to_download = yes_or_no("Ok to download?") - if not ok_to_download: - if yes_or_no("Change your selection?"): - action = "customized" - pass - else: - done = True - else: - done = True - return datasets if ok_to_download else None - - -# --------------------------------------------- -def recommended_datasets() -> dict: - datasets = dict() - for ds in Datasets.keys(): - if Datasets[ds].get("recommended", False): - datasets[ds] = True - return datasets - - -# --------------------------------------------- -def default_dataset() -> dict: - datasets = dict() - for ds in Datasets.keys(): - if Datasets[ds].get("default", False): - datasets[ds] = True - return datasets - - -# --------------------------------------------- -def all_datasets() -> dict: - datasets = dict() - for ds in Datasets.keys(): - datasets[ds] = True - return datasets - - # --------------------------------------------- def HfLogin(access_token) -> str: """ @@ -242,7 +122,7 @@ def HfLogin(access_token) -> str: # -------------------------------Authenticate against Hugging Face -def authenticate(yes_to_all=False): +def save_hf_token(yes_to_all=False): print("** LICENSE AGREEMENT FOR WEIGHT FILES **") print("=" * shutil.get_terminal_size()[0]) print( @@ -356,149 +236,6 @@ You may re-run the configuration script again in the future if you do not wish t return access_token -# --------------------------------------------- -# look for legacy model.ckpt in models directory and offer to -# normalize its name -def migrate_models_ckpt(): - model_path = os.path.join(Globals.root, Model_dir, Weights_dir) - if not os.path.exists(os.path.join(model_path, "model.ckpt")): - return - new_name = Datasets["stable-diffusion-1.4"]["file"] - print('You seem to have the Stable Diffusion v4.1 "model.ckpt" already installed.') - rename = yes_or_no(f'Ok to rename it to "{new_name}" for future reference?') - if rename: - print(f"model.ckpt => {new_name}") - os.replace( - os.path.join(model_path, "model.ckpt"), os.path.join(model_path, new_name) - ) - - -# --------------------------------------------- -def download_weight_datasets( - models: dict, access_token: str, precision: str = "float32" -): - migrate_models_ckpt() - successful = dict() - for mod in models.keys(): - print(f"Downloading {mod}:") - successful[mod] = _download_repo_or_file( - Datasets[mod], access_token, precision=precision - ) - return successful - - -def _download_repo_or_file( - mconfig: DictConfig, access_token: str, precision: str = "float32" -) -> Path: - path = None - if mconfig["format"] == "ckpt": - path = _download_ckpt_weights(mconfig, access_token) - else: - path = _download_diffusion_weights(mconfig, access_token, precision=precision) - if "vae" in mconfig and "repo_id" in mconfig["vae"]: - _download_diffusion_weights( - mconfig["vae"], access_token, precision=precision - ) - return path - - -def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path: - repo_id = mconfig["repo_id"] - filename = mconfig["file"] - cache_dir = os.path.join(Globals.root, Model_dir, Weights_dir) - return hf_download_with_resume( - repo_id=repo_id, - model_dir=cache_dir, - model_name=filename, - access_token=access_token, - ) - - -def _download_diffusion_weights( - mconfig: DictConfig, access_token: str, precision: str = "float32" -): - repo_id = mconfig["repo_id"] - model_class = ( - StableDiffusionGeneratorPipeline - if mconfig.get("format", None) == "diffusers" - else AutoencoderKL - ) - extra_arg_list = [{"revision": "fp16"}, {}] if precision == "float16" else [{}] - path = None - for extra_args in extra_arg_list: - try: - path = download_from_hf( - model_class, - repo_id, - cache_subdir="diffusers", - safety_checker=None, - **extra_args, - ) - except OSError as e: - if str(e).startswith("fp16 is not a valid"): - pass - else: - print(f"An unexpected error occurred while downloading the model: {e})") - if path: - break - return path - - -# --------------------------------------------- -def hf_download_with_resume( - repo_id: str, model_dir: str, model_name: str, access_token: str = None -) -> Path: - model_dest = Path(os.path.join(model_dir, model_name)) - os.makedirs(model_dir, exist_ok=True) - - url = hf_hub_url(repo_id, model_name) - - header = {"Authorization": f"Bearer {access_token}"} if access_token else {} - open_mode = "wb" - exist_size = 0 - - if os.path.exists(model_dest): - exist_size = os.path.getsize(model_dest) - header["Range"] = f"bytes={exist_size}-" - open_mode = "ab" - - resp = requests.get(url, headers=header, stream=True) - total = int(resp.headers.get("content-length", 0)) - - if ( - resp.status_code == 416 - ): # "range not satisfiable", which means nothing to return - print(f"* {model_name}: complete file found. Skipping.") - return model_dest - elif resp.status_code != 200: - print(f"** An error occurred during downloading {model_name}: {resp.reason}") - elif exist_size > 0: - print(f"* {model_name}: partial file found. Resuming...") - else: - print(f"* {model_name}: Downloading...") - - try: - if total < 2000: - print(f"*** ERROR DOWNLOADING {model_name}: {resp.text}") - return None - - with open(model_dest, open_mode) as file, tqdm( - desc=model_name, - initial=exist_size, - total=total + exist_size, - unit="iB", - unit_scale=True, - unit_divisor=1000, - ) as bar: - for data in resp.iter_content(chunk_size=1024): - size = file.write(data) - bar.update(size) - except Exception as e: - print(f"An error occurred while downloading {model_name}: {str(e)}") - return None - return model_dest - - # --------------------------------------------- def download_with_progress_bar(model_url: str, model_dest: str, label: str = "the"): try: @@ -517,125 +254,6 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th print(f"Error downloading {label} model") print(traceback.format_exc()) - -# --------------------------------------------- -def update_config_file(successfully_downloaded: dict, opt: dict): - config_file = ( - Path(opt.config_file) if opt.config_file is not None else Default_config_file - ) - - # In some cases (incomplete setup, etc), the default configs directory might be missing. - # Create it if it doesn't exist. - # this check is ignored if opt.config_file is specified - user is assumed to know what they - # are doing if they are passing a custom config file from elsewhere. - if config_file is Default_config_file and not config_file.parent.exists(): - configs_src = Dataset_path.parent - configs_dest = Default_config_file.parent - shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True) - - yaml = new_config_file_contents(successfully_downloaded, config_file, opt) - - try: - backup = None - if os.path.exists(config_file): - print( - f"** {config_file.name} exists. Renaming to {config_file.stem}.yaml.orig" - ) - backup = config_file.with_suffix(".yaml.orig") - ## Ugh. Windows is unable to overwrite an existing backup file, raises a WinError 183 - if sys.platform == "win32" and backup.is_file(): - backup.unlink() - config_file.rename(backup) - - with TemporaryFile() as tmp: - tmp.write(Config_preamble.encode()) - tmp.write(yaml.encode()) - - with open(str(config_file.expanduser().resolve()), "wb") as new_config: - tmp.seek(0) - new_config.write(tmp.read()) - - except Exception as e: - print(f"**Error creating config file {config_file}: {str(e)} **") - if backup is not None: - print("restoring previous config file") - ## workaround, for WinError 183, see above - if sys.platform == "win32" and config_file.is_file(): - config_file.unlink() - backup.rename(config_file) - return - - print(f"Successfully created new configuration file {config_file}") - - -# --------------------------------------------- -def new_config_file_contents(successfully_downloaded: dict, config_file: Path, opt: dict) -> str: - if config_file.exists(): - conf = OmegaConf.load(str(config_file.expanduser().resolve())) - else: - conf = OmegaConf.create() - - default_selected = None - for model in successfully_downloaded: - - # a bit hacky - what we are doing here is seeing whether a checkpoint - # version of the model was previously defined, and whether the current - # model is a diffusers (indicated with a path) - if conf.get(model) and Path(successfully_downloaded[model]).is_dir(): - offer_to_delete_weights(model, conf[model], opt.yes_to_all) - - stanza = {} - mod = Datasets[model] - stanza["description"] = mod["description"] - stanza["repo_id"] = mod["repo_id"] - stanza["format"] = mod["format"] - # diffusers don't need width and height (probably .ckpt doesn't either) - # so we no longer require these in INITIAL_MODELS.yaml - if "width" in mod: - stanza["width"] = mod["width"] - if "height" in mod: - stanza["height"] = mod["height"] - if "file" in mod: - stanza["weights"] = os.path.relpath( - successfully_downloaded[model], start=Globals.root - ) - stanza["config"] = os.path.normpath(os.path.join(SD_Configs, mod["config"])) - if "vae" in mod: - if "file" in mod["vae"]: - stanza["vae"] = os.path.normpath( - os.path.join(Model_dir, Weights_dir, mod["vae"]["file"]) - ) - else: - stanza["vae"] = mod["vae"] - if mod.get("default", False): - stanza["default"] = True - default_selected = True - - conf[model] = stanza - - # if no default model was chosen, then we select the first - # one in the list - if not default_selected: - conf[list(successfully_downloaded.keys())[0]]["default"] = True - - return OmegaConf.to_yaml(conf) - -# --------------------------------------------- -def offer_to_delete_weights(model_name: str, conf_stanza: dict, yes_to_all: bool): - if not (weights := conf_stanza.get('weights')): - return - if re.match('/VAE/',conf_stanza.get('config')): - return - if yes_to_all or \ - yes_or_no(f'\n** The checkpoint version of {model_name} is superseded by the diffusers version. Delete the original file {weights}?', default_yes=False): - weights = Path(weights) - if not weights.is_absolute(): - weights = Path(Globals.root) / weights - try: - weights.unlink() - except OSError as e: - print(str(e)) - # --------------------------------------------- # this will preload the Bert tokenizer fles def download_bert(): @@ -652,22 +270,6 @@ def download_bert(): print("...success", file=sys.stderr) -# --------------------------------------------- -def download_from_hf( - model_class: object, model_name: str, cache_subdir: Path = Path("hub"), **kwargs -): - print("", file=sys.stderr) # to prevent tqdm from overwriting - path = global_cache_dir(cache_subdir) - model = model_class.from_pretrained( - model_name, - cache_dir=path, - resume_download=True, - **kwargs, - ) - model_name = '--'.join(('models',*model_name.split('/'))) - return path / model_name if model else None - - # --------------------------------------------- def download_clip(): print("Installing CLIP model (ignore deprecation errors)...", file=sys.stderr) @@ -744,8 +346,9 @@ def download_clipseg(): def download_safety_checker(): print("Installing model for NSFW content detection...", file=sys.stderr) try: - from diffusers.pipelines.stable_diffusion.safety_checker import \ - StableDiffusionSafetyChecker + from diffusers.pipelines.stable_diffusion.safety_checker import ( + StableDiffusionSafetyChecker, + ) from transformers import AutoFeatureExtractor except ModuleNotFoundError: print("Error installing NSFW checker model:") @@ -759,52 +362,6 @@ def download_safety_checker(): print("...success", file=sys.stderr) -# ------------------------------------- -def download_weights(opt: dict) -> Union[str, None]: - precision = ( - "float32" - if opt.full_precision - else choose_precision(torch.device(choose_torch_device())) - ) - - if opt.yes_to_all: - models = default_dataset() if opt.default_only else recommended_datasets() - access_token = authenticate(opt.yes_to_all) - if len(models) > 0: - successfully_downloaded = download_weight_datasets( - models, access_token, precision=precision - ) - update_config_file(successfully_downloaded, opt) - return - - else: - choice = user_wants_to_download_weights() - - if choice == "recommended": - models = recommended_datasets() - elif choice == "all": - models = all_datasets() - elif choice == "customized": - models = select_datasets(choice) - if models is None and yes_or_no("Quit?", default_yes=False): - sys.exit(0) - else: # 'skip' - return - - access_token = authenticate() - if access_token is not None: - HfFolder.save_token(access_token) - - print("\n** DOWNLOADING WEIGHTS **") - successfully_downloaded = download_weight_datasets( - models, access_token, precision=precision - ) - - update_config_file(successfully_downloaded, opt) - if len(successfully_downloaded) < len(models): - return "some of the model weights downloads were not successful" - - # ------------------------------------- def get_root(root: str = None) -> str: if root: @@ -951,13 +508,6 @@ class ProgressBar: # ------------------------------------- def main(): parser = argparse.ArgumentParser(description="InvokeAI model downloader") - parser.add_argument( - "--interactive", - dest="interactive", - action=argparse.BooleanOptionalAction, - default=True, - help="run in interactive mode (default) - DEPRECATED", - ) parser.add_argument( "--skip-sd-weights", dest="skip_sd_weights", @@ -1005,26 +555,16 @@ def main(): # setting a global here Globals.root = os.path.expanduser(get_root(opt.root) or "") + errors = set() + try: # We check for to see if the runtime directory is correctly initialized. if Globals.root == "" or not os.path.exists( os.path.join(Globals.root, "invokeai.init") ): initialize_rootdir(Globals.root, opt.yes_to_all) + save_hf_token(opt.yes_to_all) - # Optimistically try to download all required assets. If any errors occur, add them and proceed anyway. - errors = set() - - if not opt.interactive: - print( - "WARNING: The --(no)-interactive argument is deprecated and will be removed. Use --skip-sd-weights." - ) - opt.skip_sd_weights = True - if opt.skip_sd_weights: - print("** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **") - else: - print("** DOWNLOADING DIFFUSION WEIGHTS **") - errors.add(download_weights(opt)) print("\n** DOWNLOADING SUPPORT MODELS **") download_bert() download_clip() @@ -1033,6 +573,13 @@ def main(): download_codeformer() download_clipseg() download_safety_checker() + + if opt.skip_sd_weights: + print("** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **") + else: + print("** DOWNLOADING DIFFUSION WEIGHTS **") + errors.add(select_and_download_models(opt)) + postscript(errors=errors) except KeyboardInterrupt: print("\nGoodbye! Come back soon.") diff --git a/ldm/invoke/model_manager.py b/ldm/invoke/model_manager.py index 3135931eea..0f3affe5f7 100644 --- a/ldm/invoke/model_manager.py +++ b/ldm/invoke/model_manager.py @@ -25,19 +25,20 @@ import torch import transformers from diffusers import AutoencoderKL from diffusers import logging as dlogging -from diffusers.utils.logging import (get_verbosity, set_verbosity, - set_verbosity_error) +from diffusers.utils.logging import get_verbosity, set_verbosity, set_verbosity_error from huggingface_hub import scan_cache_dir from omegaconf import OmegaConf from omegaconf.dictconfig import DictConfig from picklescan.scanner import scan_file_path -from ldm.invoke.generator.diffusers_pipeline import \ - StableDiffusionGeneratorPipeline -from ldm.invoke.globals import (Globals, global_autoscan_dir, global_cache_dir, - global_models_dir) -from ldm.util import (ask_user, download_with_progress_bar, - instantiate_from_config) +from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline +from ldm.invoke.globals import ( + Globals, + global_autoscan_dir, + global_cache_dir, + global_models_dir, +) +from ldm.util import ask_user, download_with_progress_bar, instantiate_from_config DEFAULT_MAX_MODELS = 2 VAE_TO_REPO_ID = { # hack, see note in convert_and_import() @@ -374,8 +375,9 @@ class ModelManager(object): print( f">> Converting legacy checkpoint {model_name} into a diffusers model..." ) - from ldm.invoke.ckpt_to_diffuser import \ - load_pipeline_from_original_stable_diffusion_ckpt + from ldm.invoke.ckpt_to_diffuser import ( + load_pipeline_from_original_stable_diffusion_ckpt, + ) if vae_config := self._choose_diffusers_vae(model_name): vae = self._load_vae(vae_config) @@ -495,8 +497,8 @@ class ModelManager(object): safety_checker=None, local_files_only=not Globals.internet_available ) if "vae" in mconfig and mconfig["vae"] is not None: - vae = self._load_vae(mconfig["vae"]) - pipeline_args.update(vae=vae) + if vae := self._load_vae(mconfig["vae"]): + pipeline_args.update(vae=vae) if not isinstance(name_or_path, Path): pipeline_args.update(cache_dir=global_cache_dir("diffusers")) if using_fp16: @@ -551,7 +553,7 @@ class ModelManager(object): f'"{model_name}" is not a known model name. Please check your models.yaml file' ) - if "path" in mconfig: + if "path" in mconfig and mconfig["path"] is not None: path = Path(mconfig["path"]) if not path.is_absolute(): path = Path(Globals.root, path).resolve() @@ -762,7 +764,7 @@ class ModelManager(object): model_description = model_description or "Optimized version of {model_name}" print(f">> Optimizing {model_name} (30-60s)") try: - # By passing the specified VAE too the conversion function, the autoencoder + # By passing the specified VAE to the conversion function, the autoencoder # will be built into the model rather than tacked on afterward via the config file vae_model = self._load_vae(vae) if vae else None convert_ckpt_to_diffuser( @@ -789,7 +791,9 @@ class ModelManager(object): print(">> Conversion succeeded") except Exception as e: print(f"** Conversion failed: {str(e)}") - print("** If you are trying to convert an inpainting or 2.X model, please indicate the correct config file (e.g. v1-inpainting-inference.yaml)") + print( + "** If you are trying to convert an inpainting or 2.X model, please indicate the correct config file (e.g. v1-inpainting-inference.yaml)" + ) return new_config @@ -1102,7 +1106,12 @@ class ModelManager(object): def _load_vae(self, vae_config) -> AutoencoderKL: vae_args = {} - name_or_path = self.model_name_or_path(vae_config) + try: + name_or_path = self.model_name_or_path(vae_config) + except Exception: + return None + if name_or_path is None: + return None using_fp16 = self.precision == "float16" vae_args.update( diff --git a/pyproject.toml b/pyproject.toml index f3dfa69b91..cfbcf0284e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -108,6 +108,7 @@ dependencies = [ "invokeai-configure" = "ldm.invoke.config.invokeai_configure:main" "invokeai-merge" = "ldm.invoke.merge_diffusers:main" # note name munging "invokeai-ti" = "ldm.invoke.training.textual_inversion:main" +"invokeai-initial-models" = "ldm.invoke.config.initial_model_select:main" [project.urls] "Homepage" = "https://invoke-ai.github.io/InvokeAI/" From 197e6b95e34ec4bd2954ae29d4be6ea6719eb2d9 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 12 Feb 2023 23:59:18 -0500 Subject: [PATCH 02/36] add missing file --- ldm/invoke/config/initial_model_select.py | 574 ++++++++++++++++++++++ 1 file changed, 574 insertions(+) create mode 100644 ldm/invoke/config/initial_model_select.py diff --git a/ldm/invoke/config/initial_model_select.py b/ldm/invoke/config/initial_model_select.py new file mode 100644 index 0000000000..76f05960ca --- /dev/null +++ b/ldm/invoke/config/initial_model_select.py @@ -0,0 +1,574 @@ +#!/usr/bin/env python +# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein) +# Before running stable-diffusion on an internet-isolated machine, +# run this script from one with internet connectivity. The +# two machines must share a common .cache directory. +# +# Coauthor: Kevin Turner http://github.com/keturn +# +import argparse +import os +import re +import shutil +import sys +import traceback +import warnings +from argparse import Namespace +from pathlib import Path +from tempfile import TemporaryFile + +import npyscreen +import requests +from diffusers import AutoencoderKL +from huggingface_hub import hf_hub_url +from omegaconf import OmegaConf +from omegaconf.dictconfig import DictConfig +from tqdm import tqdm + +import invokeai.configs as configs +from ldm.invoke.devices import choose_precision, choose_torch_device +from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline +from ldm.invoke.globals import Globals, global_cache_dir, global_config_dir +from ldm.invoke.readline import generic_completer + +warnings.filterwarnings("ignore") +import torch + +# --------------------------globals----------------------- +Model_dir = "models" +Weights_dir = "ldm/stable-diffusion-v1/" + +# the initial "configs" dir is now bundled in the `invokeai.configs` package +Dataset_path = Path(configs.__path__[0]) / "INITIAL_MODELS.yaml" + +Default_config_file = Path(global_config_dir()) / "models.yaml" +SD_Configs = Path(global_config_dir()) / "stable-diffusion" + +Datasets = OmegaConf.load(Dataset_path) +completer = generic_completer(["yes", "no"]) + +Config_preamble = """# This file describes the alternative machine learning models +# available to InvokeAI script. +# +# To add a new model, follow the examples below. Each +# model requires a model config file, a weights file, +# and the width and height of the images it +# was trained on. +""" + + +# ------------------------------------- +def get_root(root: str = None) -> str: + if root: + return root + elif os.environ.get("INVOKEAI_ROOT"): + return os.environ.get("INVOKEAI_ROOT") + else: + return Globals.root + + +class addRemoveModelsForm(npyscreen.FormMultiPageAction): + def __init__(self, parentApp, name): + self.initial_models = OmegaConf.load(Dataset_path) + try: + self.existing_models = OmegaConf.load(Default_config_file) + except: + self.existing_models = dict() + self.starter_model_list = [ + x for x in list(self.initial_models.keys()) if x not in self.existing_models + ] + super().__init__(parentApp, name) + + def create(self): + starter_model_labels = [ + "%-30s %-50s" % (x, self.initial_models[x].description) + for x in self.starter_model_list + ] + recommended_models = [ + x + for x in self.starter_model_list + if self.initial_models[x].get("recommended", False) + ] + previously_installed_models = [ + x for x in list(self.initial_models.keys()) if x in self.existing_models + ] + self.add_widget_intelligent( + npyscreen.TitleText, + name="This is a starter set of Stable Diffusion models from HuggingFace", + editable=False, + color="CONTROL", + ) + self.add_widget_intelligent( + npyscreen.FixedText, + value="Select models to install:", + editable=False, + color="LABELBOLD", + ) + self.add_widget_intelligent(npyscreen.FixedText, value="", editable=False), + self.models_selected = self.add_widget_intelligent( + npyscreen.MultiSelect, + name="Install/Remove Models", + values=starter_model_labels, + value=[ + self.starter_model_list.index(x) + for x in self.initial_models + if x in recommended_models + ], + max_height=len(starter_model_labels) + 1, + scroll_exit=True, + ) + if len(previously_installed_models) > 0: + self.add_widget_intelligent( + npyscreen.TitleText, + name="These starter models are already installed. Use the command-line or Web UIs to manage them:", + editable=False, + color="CONTROL", + ) + for m in previously_installed_models: + self.add_widget_intelligent( + npyscreen.FixedText, + value=m, + editable=False, + relx=10, + ) + self.models_selected.editing = True + + def on_ok(self): + self.parentApp.setNextForm(None) + self.editing = False + self.parentApp.selected_models = [ + self.starter_model_list[x] for x in self.models_selected.value + ] + npyscreen.notify(f"Installing selected {self.parentApp.selected_models}") + + def on_cancel(self): + self.parentApp.setNextForm(None) + self.parentApp.selected_models = None + self.editing = False + + +class AddRemoveModelApplication(npyscreen.NPSAppManaged): + def __init__(self, saved_args=None): + super().__init__() + self.models_to_install = None + + def onStart(self): + npyscreen.setTheme(npyscreen.Themes.DefaultTheme) + self.main = self.addForm( + "MAIN", + addRemoveModelsForm, + name="Add/Remove Models", + ) + + +# --------------------------------------------- +def yes_or_no(prompt: str, default_yes=True): + completer.set_options(["yes", "no"]) + completer.complete_extensions(None) # turn off path-completion mode + default = "y" if default_yes else "n" + response = input(f"{prompt} [{default}] ") or default + if default_yes: + return response[0] not in ("n", "N") + else: + return response[0] in ("y", "Y") + + +# --------------------------------------------- +def recommended_datasets() -> dict: + datasets = dict() + for ds in Datasets.keys(): + if Datasets[ds].get("recommended", False): + datasets[ds] = True + return datasets + + +# --------------------------------------------- +def default_dataset() -> dict: + datasets = dict() + for ds in Datasets.keys(): + if Datasets[ds].get("default", False): + datasets[ds] = True + return datasets + + +# --------------------------------------------- +def all_datasets() -> dict: + datasets = dict() + for ds in Datasets.keys(): + datasets[ds] = True + return datasets + + +# --------------------------------------------- +# look for legacy model.ckpt in models directory and offer to +# normalize its name +def migrate_models_ckpt(): + model_path = os.path.join(Globals.root, Model_dir, Weights_dir) + if not os.path.exists(os.path.join(model_path, "model.ckpt")): + return + new_name = Datasets["stable-diffusion-1.4"]["file"] + print('You seem to have the Stable Diffusion v4.1 "model.ckpt" already installed.') + rename = yes_or_no(f'Ok to rename it to "{new_name}" for future reference?') + if rename: + print(f"model.ckpt => {new_name}") + os.replace( + os.path.join(model_path, "model.ckpt"), os.path.join(model_path, new_name) + ) + + +# --------------------------------------------- +def download_weight_datasets( + models: dict, access_token: str, precision: str = "float32" +): + migrate_models_ckpt() + successful = dict() + for mod in models.keys(): + print(f"Downloading {mod}:") + successful[mod] = _download_repo_or_file( + Datasets[mod], access_token, precision=precision + ) + return successful + + +def _download_repo_or_file( + mconfig: DictConfig, access_token: str, precision: str = "float32" +) -> Path: + path = None + if mconfig["format"] == "ckpt": + path = _download_ckpt_weights(mconfig, access_token) + else: + path = _download_diffusion_weights(mconfig, access_token, precision=precision) + if "vae" in mconfig and "repo_id" in mconfig["vae"]: + _download_diffusion_weights( + mconfig["vae"], access_token, precision=precision + ) + return path + + +def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path: + repo_id = mconfig["repo_id"] + filename = mconfig["file"] + cache_dir = os.path.join(Globals.root, Model_dir, Weights_dir) + return hf_download_with_resume( + repo_id=repo_id, + model_dir=cache_dir, + model_name=filename, + access_token=access_token, + ) + + +# --------------------------------------------- +def download_from_hf( + model_class: object, model_name: str, cache_subdir: Path = Path("hub"), **kwargs +): + print("", file=sys.stderr) # to prevent tqdm from overwriting + path = global_cache_dir(cache_subdir) + model = model_class.from_pretrained( + model_name, + cache_dir=path, + resume_download=True, + **kwargs, + ) + model_name = "--".join(("models", *model_name.split("/"))) + return path / model_name if model else None + + +def _download_diffusion_weights( + mconfig: DictConfig, access_token: str, precision: str = "float32" +): + repo_id = mconfig["repo_id"] + model_class = ( + StableDiffusionGeneratorPipeline + if mconfig.get("format", None) == "diffusers" + else AutoencoderKL + ) + extra_arg_list = [{"revision": "fp16"}, {}] if precision == "float16" else [{}] + path = None + for extra_args in extra_arg_list: + try: + path = download_from_hf( + model_class, + repo_id, + cache_subdir="diffusers", + safety_checker=None, + **extra_args, + ) + except OSError as e: + if str(e).startswith("fp16 is not a valid"): + pass + else: + print(f"An unexpected error occurred while downloading the model: {e})") + if path: + break + return path + + +# --------------------------------------------- +def hf_download_with_resume( + repo_id: str, model_dir: str, model_name: str, access_token: str = None +) -> Path: + model_dest = Path(os.path.join(model_dir, model_name)) + os.makedirs(model_dir, exist_ok=True) + + url = hf_hub_url(repo_id, model_name) + + header = {"Authorization": f"Bearer {access_token}"} if access_token else {} + open_mode = "wb" + exist_size = 0 + + if os.path.exists(model_dest): + exist_size = os.path.getsize(model_dest) + header["Range"] = f"bytes={exist_size}-" + open_mode = "ab" + + resp = requests.get(url, headers=header, stream=True) + total = int(resp.headers.get("content-length", 0)) + + if ( + resp.status_code == 416 + ): # "range not satisfiable", which means nothing to return + print(f"* {model_name}: complete file found. Skipping.") + return model_dest + elif resp.status_code != 200: + print(f"** An error occurred during downloading {model_name}: {resp.reason}") + elif exist_size > 0: + print(f"* {model_name}: partial file found. Resuming...") + else: + print(f"* {model_name}: Downloading...") + + try: + if total < 2000: + print(f"*** ERROR DOWNLOADING {model_name}: {resp.text}") + return None + + with open(model_dest, open_mode) as file, tqdm( + desc=model_name, + initial=exist_size, + total=total + exist_size, + unit="iB", + unit_scale=True, + unit_divisor=1000, + ) as bar: + for data in resp.iter_content(chunk_size=1024): + size = file.write(data) + bar.update(size) + except Exception as e: + print(f"An error occurred while downloading {model_name}: {str(e)}") + return None + return model_dest + + +# --------------------------------------------- +def update_config_file(successfully_downloaded: dict, opt: dict): + config_file = ( + Path(opt.config_file) if opt.config_file is not None else Default_config_file + ) + + # In some cases (incomplete setup, etc), the default configs directory might be missing. + # Create it if it doesn't exist. + # this check is ignored if opt.config_file is specified - user is assumed to know what they + # are doing if they are passing a custom config file from elsewhere. + if config_file is Default_config_file and not config_file.parent.exists(): + configs_src = Dataset_path.parent + configs_dest = Default_config_file.parent + shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True) + + yaml = new_config_file_contents(successfully_downloaded, config_file, opt) + + try: + backup = None + if os.path.exists(config_file): + print( + f"** {config_file.name} exists. Renaming to {config_file.stem}.yaml.orig" + ) + backup = config_file.with_suffix(".yaml.orig") + ## Ugh. Windows is unable to overwrite an existing backup file, raises a WinError 183 + if sys.platform == "win32" and backup.is_file(): + backup.unlink() + config_file.rename(backup) + + with TemporaryFile() as tmp: + tmp.write(Config_preamble.encode()) + tmp.write(yaml.encode()) + + with open(str(config_file.expanduser().resolve()), "wb") as new_config: + tmp.seek(0) + new_config.write(tmp.read()) + + except Exception as e: + print(f"**Error creating config file {config_file}: {str(e)} **") + if backup is not None: + print("restoring previous config file") + ## workaround, for WinError 183, see above + if sys.platform == "win32" and config_file.is_file(): + config_file.unlink() + backup.rename(config_file) + return + + print(f"Successfully created new configuration file {config_file}") + + +# --------------------------------------------- +def new_config_file_contents( + successfully_downloaded: dict, config_file: Path, opt: dict +) -> str: + if config_file.exists(): + conf = OmegaConf.load(str(config_file.expanduser().resolve())) + else: + conf = OmegaConf.create() + + default_selected = None + for model in successfully_downloaded: + # a bit hacky - what we are doing here is seeing whether a checkpoint + # version of the model was previously defined, and whether the current + # model is a diffusers (indicated with a path) + if conf.get(model) and Path(successfully_downloaded[model]).is_dir(): + offer_to_delete_weights(model, conf[model], opt.yes_to_all) + + stanza = {} + mod = Datasets[model] + stanza["description"] = mod["description"] + stanza["repo_id"] = mod["repo_id"] + stanza["format"] = mod["format"] + # diffusers don't need width and height (probably .ckpt doesn't either) + # so we no longer require these in INITIAL_MODELS.yaml + if "width" in mod: + stanza["width"] = mod["width"] + if "height" in mod: + stanza["height"] = mod["height"] + if "file" in mod: + stanza["weights"] = os.path.relpath( + successfully_downloaded[model], start=Globals.root + ) + stanza["config"] = os.path.normpath(os.path.join(SD_Configs, mod["config"])) + if "vae" in mod: + if "file" in mod["vae"]: + stanza["vae"] = os.path.normpath( + os.path.join(Model_dir, Weights_dir, mod["vae"]["file"]) + ) + else: + stanza["vae"] = mod["vae"] + if mod.get("default", False): + stanza["default"] = True + default_selected = True + + conf[model] = stanza + + # if no default model was chosen, then we select the first + # one in the list + if not default_selected: + conf[list(successfully_downloaded.keys())[0]]["default"] = True + + return OmegaConf.to_yaml(conf) + + +# --------------------------------------------- +def offer_to_delete_weights(model_name: str, conf_stanza: dict, yes_to_all: bool): + if not (weights := conf_stanza.get("weights")): + return + if re.match("/VAE/", conf_stanza.get("config")): + return + if yes_to_all or yes_or_no( + f"\n** The checkpoint version of {model_name} is superseded by the diffusers version. Delete the original file {weights}?", + default_yes=False, + ): + weights = Path(weights) + if not weights.is_absolute(): + weights = Path(Globals.root) / weights + try: + weights.unlink() + except OSError as e: + print(str(e)) + + +# -------------------------------------------------------- +def select_and_download_models(opt: Namespace): + if opt.default_only: + models_to_download = default_dataset() + else: + myapplication = AddRemoveModelApplication() + myapplication.run() + models_to_download = dict(map(lambda x: (x, True), myapplication.selected_models)) + + if not models_to_download: + print( + '** No models were selected. To run this program again, select "Install initial models" from the invoke script.' + ) + return + + print("** Downloading and installing the selected models.") + precision = ( + "float32" + if opt.full_precision + else choose_precision(torch.device(choose_torch_device())) + ) + successfully_downloaded = download_weight_datasets( + models=models_to_download, + access_token=None, + precision=precision, + ) + + update_config_file(successfully_downloaded, opt) + if len(successfully_downloaded) < len(models_to_download): + print("** Some of the model downloads were not successful") + + print( + "\nYour starting models were installed. To find and add more models, see https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS" + ) + + +# ------------------------------------- +def main(): + parser = argparse.ArgumentParser(description="InvokeAI model downloader") + parser.add_argument( + "--full-precision", + dest="full_precision", + action=argparse.BooleanOptionalAction, + type=bool, + default=False, + help="use 32-bit weights instead of faster 16-bit weights", + ) + parser.add_argument( + "--yes", + "-y", + dest="yes_to_all", + action="store_true", + help='answer "yes" to all prompts', + ) + parser.add_argument( + "--default_only", + action="store_true", + help="only install the default model", + ) + parser.add_argument( + "--config_file", + "-c", + dest="config_file", + type=str, + default=None, + help="path to configuration file to create", + ) + parser.add_argument( + "--root_dir", + dest="root", + type=str, + default=None, + help="path to root of install directory", + ) + opt = parser.parse_args() + + # setting a global here + Globals.root = os.path.expanduser(get_root(opt.root) or "") + + try: + select_and_download_models(opt) + except KeyboardInterrupt: + print("\nGoodbye! Come back soon.") + except Exception as e: + print(f'\nA problem occurred during initialization.\nThe error was: "{str(e)}"') + print(traceback.format_exc()) + + +# ------------------------------------- +if __name__ == "__main__": + main() From fbbbba2fac246b3c07dda3fcad248ef72e064487 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 13 Feb 2023 07:40:00 -0500 Subject: [PATCH 03/36] correct crash on edge case --- ldm/invoke/config/initial_model_select.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldm/invoke/config/initial_model_select.py b/ldm/invoke/config/initial_model_select.py index 76f05960ca..f3b0790e4b 100644 --- a/ldm/invoke/config/initial_model_select.py +++ b/ldm/invoke/config/initial_model_select.py @@ -488,7 +488,7 @@ def select_and_download_models(opt: Namespace): else: myapplication = AddRemoveModelApplication() myapplication.run() - models_to_download = dict(map(lambda x: (x, True), myapplication.selected_models)) + models_to_download = dict(map(lambda x: (x, True), myapplication.selected_models)) if myapplication.selected_models else None if not models_to_download: print( From 7545e38655b9191bd7c7483c19c55867d8dfaf4f Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 14 Feb 2023 00:02:19 -0500 Subject: [PATCH 04/36] frontend design done; functionality not hooked up yet --- installer/templates/invoke.bat.in | 2 +- installer/templates/invoke.sh.in | 2 +- ldm/invoke/config/invokeai_configure.py | 15 ++- ...itial_model_select.py => model_install.py} | 91 ++++++++++++++----- ldm/invoke/merge_diffusers.py | 2 - pyproject.toml | 2 +- 6 files changed, 84 insertions(+), 30 deletions(-) rename ldm/invoke/config/{initial_model_select.py => model_install.py} (87%) diff --git a/installer/templates/invoke.bat.in b/installer/templates/invoke.bat.in index 838c860bea..e2314af986 100644 --- a/installer/templates/invoke.bat.in +++ b/installer/templates/invoke.bat.in @@ -33,7 +33,7 @@ IF /I "%restore%" == "1" ( echo Running invokeai-configure... python .venv\Scripts\invokeai-configure.exe %* ) ELSE IF /I "%restore%" == "6" ( - echo Running invokeai-initial-models... + echo Running invokeai-model-install... python .venv\Scripts\invokeai-initial-models.exe %* ) ELSE IF /I "%restore%" == "7" ( echo Developer Console diff --git a/installer/templates/invoke.sh.in b/installer/templates/invoke.sh.in index 10819de7f1..9b9be48e9a 100644 --- a/installer/templates/invoke.sh.in +++ b/installer/templates/invoke.sh.in @@ -58,7 +58,7 @@ if [ "$0" != "bash" ]; then exec invokeai-configure --root ${INVOKEAI_ROOT} ;; 6) - exec invokeai-initial-models --root ${INVOKEAI_ROOT} + exec invokeai-model-install --root ${INVOKEAI_ROOT} ;; 7) echo "Developer Console:" diff --git a/ldm/invoke/config/invokeai_configure.py b/ldm/invoke/config/invokeai_configure.py index 08d95e1ea4..3ff555881c 100755 --- a/ldm/invoke/config/invokeai_configure.py +++ b/ldm/invoke/config/invokeai_configure.py @@ -31,10 +31,9 @@ from transformers import ( ) import invokeai.configs as configs -from ldm.invoke.config.initial_model_select import ( +from ldm.invoke.config.model_install import ( download_from_hf, select_and_download_models, - yes_or_no, ) from ldm.invoke.globals import Globals, global_config_dir from ldm.invoke.readline import generic_completer @@ -103,6 +102,18 @@ Have fun! print(message) +# --------------------------------------------- +def yes_or_no(prompt: str, default_yes=True): + completer.set_options(["yes", "no"]) + completer.complete_extensions(None) # turn off path-completion mode + default = "y" if default_yes else "n" + response = input(f"{prompt} [{default}] ") or default + if default_yes: + return response[0] not in ("n", "N") + else: + return response[0] in ("y", "Y") + + # --------------------------------------------- def HfLogin(access_token) -> str: """ diff --git a/ldm/invoke/config/initial_model_select.py b/ldm/invoke/config/model_install.py similarity index 87% rename from ldm/invoke/config/initial_model_select.py rename to ldm/invoke/config/model_install.py index f3b0790e4b..cb7b957e4c 100644 --- a/ldm/invoke/config/initial_model_select.py +++ b/ldm/invoke/config/model_install.py @@ -14,6 +14,7 @@ import sys import traceback import warnings from argparse import Namespace +from math import ceil from pathlib import Path from tempfile import TemporaryFile @@ -29,7 +30,6 @@ import invokeai.configs as configs from ldm.invoke.devices import choose_precision, choose_torch_device from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline from ldm.invoke.globals import Globals, global_cache_dir, global_config_dir -from ldm.invoke.readline import generic_completer warnings.filterwarnings("ignore") import torch @@ -45,7 +45,6 @@ Default_config_file = Path(global_config_dir()) / "models.yaml" SD_Configs = Path(global_config_dir()) / "stable-diffusion" Datasets = OmegaConf.load(Dataset_path) -completer = generic_completer(["yes", "no"]) Config_preamble = """# This file describes the alternative machine learning models # available to InvokeAI script. @@ -56,6 +55,14 @@ Config_preamble = """# This file describes the alternative machine learning mode # was trained on. """ +# ------------------------------------- +def yes_or_no(prompt: str, default_yes=True): + default = "y" if default_yes else "n" + response = input(f"{prompt} [{default}] ") or default + if default_yes: + return response[0] not in ("n", "N") + else: + return response[0] in ("y", "Y") # ------------------------------------- def get_root(root: str = None) -> str: @@ -67,7 +74,7 @@ def get_root(root: str = None) -> str: return Globals.root -class addRemoveModelsForm(npyscreen.FormMultiPageAction): +class addModelsForm(npyscreen.FormMultiPageAction): def __init__(self, parentApp, name): self.initial_models = OmegaConf.load(Dataset_path) try: @@ -118,19 +125,70 @@ class addRemoveModelsForm(npyscreen.FormMultiPageAction): scroll_exit=True, ) if len(previously_installed_models) > 0: - self.add_widget_intelligent( + title = self.add_widget_intelligent( npyscreen.TitleText, - name="These starter models are already installed. Use the command-line or Web UIs to manage them:", + name=f"These starter models are already installed. Use the command-line or Web UIs to manage them:", editable=False, color="CONTROL", ) - for m in previously_installed_models: + y_origin = title.rely+1 + + # use three columns + col_cnt = 3 + col_width = max([len(x) for x in previously_installed_models])+2 + rows = ceil(len(previously_installed_models)/col_cnt) + previously_installed_models = sorted(previously_installed_models) + + for i in range(0,len(previously_installed_models)): + m = previously_installed_models[i] + row = i % rows + col = i // rows self.add_widget_intelligent( npyscreen.FixedText, value=m, editable=False, - relx=10, + relx=col_cnt+col*col_width, + rely=y_origin+row ) + self.nextrely += rows + self.autoload_directory = self.add_widget_intelligent( + npyscreen.TitleFilename, + name='Import all .ckpt/.safetensors files from this directory ( to autocomplete):', + select_dir=True, + must_exist=True, + use_two_lines=False, + begin_entry_at=81, + value=os.path.expanduser('~'+'/'), + scroll_exit=True, + ) + self.autoload_onstartup = self.add_widget_intelligent( + npyscreen.Checkbox, + name='Scan this directory each time InvokeAI starts for new models to import.', + value=False, + scroll_exit=True, + ) + self.nextrely += 1 + self.add_widget_intelligent( + npyscreen.TitleText, + name='In the space below, you may cut and paste URLs, paths to .ckpt/.safetensor files, or HuggingFace diffusers repository names to import:', + editable=False, + color="CONTROL", + ) + self.model_names = self.add_widget_intelligent( + npyscreen.MultiLineEdit, + max_width=75, + max_height=16, + scroll_exit=True, + relx=18 + ) + self.autoload_onstartup = self.add_widget_intelligent( + npyscreen.TitleSelectOne, + name='Keep files in original format, or convert .ckpt/.safetensors into fast-loading diffusers models:', + values=['Original format','Convert to diffusers format'], + value=0, + scroll_exit=True, + ) + self.models_selected.editing = True def on_ok(self): @@ -146,8 +204,7 @@ class addRemoveModelsForm(npyscreen.FormMultiPageAction): self.parentApp.selected_models = None self.editing = False - -class AddRemoveModelApplication(npyscreen.NPSAppManaged): +class AddModelApplication(npyscreen.NPSAppManaged): def __init__(self, saved_args=None): super().__init__() self.models_to_install = None @@ -156,23 +213,11 @@ class AddRemoveModelApplication(npyscreen.NPSAppManaged): npyscreen.setTheme(npyscreen.Themes.DefaultTheme) self.main = self.addForm( "MAIN", - addRemoveModelsForm, + addModelsForm, name="Add/Remove Models", ) -# --------------------------------------------- -def yes_or_no(prompt: str, default_yes=True): - completer.set_options(["yes", "no"]) - completer.complete_extensions(None) # turn off path-completion mode - default = "y" if default_yes else "n" - response = input(f"{prompt} [{default}] ") or default - if default_yes: - return response[0] not in ("n", "N") - else: - return response[0] in ("y", "Y") - - # --------------------------------------------- def recommended_datasets() -> dict: datasets = dict() @@ -486,7 +531,7 @@ def select_and_download_models(opt: Namespace): if opt.default_only: models_to_download = default_dataset() else: - myapplication = AddRemoveModelApplication() + myapplication = AddModelApplication() myapplication.run() models_to_download = dict(map(lambda x: (x, True), myapplication.selected_models)) if myapplication.selected_models else None diff --git a/ldm/invoke/merge_diffusers.py b/ldm/invoke/merge_diffusers.py index f5da927b9a..c463bd8e8e 100644 --- a/ldm/invoke/merge_diffusers.py +++ b/ldm/invoke/merge_diffusers.py @@ -180,11 +180,9 @@ class FloatSlider(npyscreen.Slider): stri = stri.rjust(l) return stri - class FloatTitleSlider(npyscreen.TitleText): _entry_type = FloatSlider - class mergeModelsForm(npyscreen.FormMultiPageAction): interpolations = ["weighted_sum", "sigmoid", "inv_sigmoid", "add_difference"] diff --git a/pyproject.toml b/pyproject.toml index cfbcf0284e..2eae8e0493 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -108,7 +108,7 @@ dependencies = [ "invokeai-configure" = "ldm.invoke.config.invokeai_configure:main" "invokeai-merge" = "ldm.invoke.merge_diffusers:main" # note name munging "invokeai-ti" = "ldm.invoke.training.textual_inversion:main" -"invokeai-initial-models" = "ldm.invoke.config.initial_model_select:main" +"invokeai-model-install" = "ldm.invoke.config.model_install:main" [project.urls] "Homepage" = "https://invoke-ai.github.io/InvokeAI/" From f299f407636c1d1f7d15f32218c17a019da1c6dc Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 14 Feb 2023 16:32:54 -0500 Subject: [PATCH 05/36] convert existing model display to column format --- ldm/invoke/config/model_install.py | 193 +++++++++++++++++------------ ldm/invoke/config/widgets.py | 70 +++++++++++ ldm/invoke/merge_diffusers.py | 12 +- 3 files changed, 186 insertions(+), 89 deletions(-) create mode 100644 ldm/invoke/config/widgets.py diff --git a/ldm/invoke/config/model_install.py b/ldm/invoke/config/model_install.py index cb7b957e4c..84ba124658 100644 --- a/ldm/invoke/config/model_install.py +++ b/ldm/invoke/config/model_install.py @@ -7,6 +7,7 @@ # Coauthor: Kevin Turner http://github.com/keturn # import argparse +import curses import os import re import shutil @@ -22,6 +23,7 @@ import npyscreen import requests from diffusers import AutoencoderKL from huggingface_hub import hf_hub_url +from npyscreen import widget from omegaconf import OmegaConf from omegaconf.dictconfig import DictConfig from tqdm import tqdm @@ -30,6 +32,7 @@ import invokeai.configs as configs from ldm.invoke.devices import choose_precision, choose_torch_device from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline from ldm.invoke.globals import Globals, global_cache_dir, global_config_dir +from ldm.invoke.config.widgets import MultiSelectColumns warnings.filterwarnings("ignore") import torch @@ -84,37 +87,52 @@ class addModelsForm(npyscreen.FormMultiPageAction): self.starter_model_list = [ x for x in list(self.initial_models.keys()) if x not in self.existing_models ] + self.installed_models=dict() super().__init__(parentApp, name) def create(self): - starter_model_labels = [ - "%-30s %-50s" % (x, self.initial_models[x].description) - for x in self.starter_model_list - ] + window_height, window_width = curses.initscr().getmaxyx() + starter_model_labels = self._get_starter_model_labels() recommended_models = [ x for x in self.starter_model_list if self.initial_models[x].get("recommended", False) ] - previously_installed_models = [ - x for x in list(self.initial_models.keys()) if x in self.existing_models - ] + previously_installed_models = sorted( + [ + x for x in list(self.initial_models.keys()) if x in self.existing_models + ] + ) + + if len(previously_installed_models) > 0: + title = self.add_widget_intelligent( + npyscreen.TitleText, + name="Currently installed starter models. Uncheck to delete:", + editable=False, + color="CONTROL", + ) + self.nextrely -= 1 + columns = 3 + self.previously_installed_models = self.add_widget_intelligent( + MultiSelectColumns, + columns=columns, + values=previously_installed_models, + value=[x for x in range(0,len(previously_installed_models))], + max_height=len(previously_installed_models)+1 // columns, + slow_scroll=True, + scroll_exit = True, + ) self.add_widget_intelligent( npyscreen.TitleText, - name="This is a starter set of Stable Diffusion models from HuggingFace", + name="Select from a starter set of Stable Diffusion models from HuggingFace:", editable=False, color="CONTROL", ) - self.add_widget_intelligent( - npyscreen.FixedText, - value="Select models to install:", - editable=False, - color="LABELBOLD", - ) + self.nextrely -= 2 self.add_widget_intelligent(npyscreen.FixedText, value="", editable=False), self.models_selected = self.add_widget_intelligent( npyscreen.MultiSelect, - name="Install/Remove Models", + name="Install Starter Models", values=starter_model_labels, value=[ self.starter_model_list.index(x) @@ -124,72 +142,80 @@ class addModelsForm(npyscreen.FormMultiPageAction): max_height=len(starter_model_labels) + 1, scroll_exit=True, ) - if len(previously_installed_models) > 0: - title = self.add_widget_intelligent( - npyscreen.TitleText, - name=f"These starter models are already installed. Use the command-line or Web UIs to manage them:", - editable=False, - color="CONTROL", - ) - y_origin = title.rely+1 - - # use three columns - col_cnt = 3 - col_width = max([len(x) for x in previously_installed_models])+2 - rows = ceil(len(previously_installed_models)/col_cnt) - previously_installed_models = sorted(previously_installed_models) - - for i in range(0,len(previously_installed_models)): - m = previously_installed_models[i] - row = i % rows - col = i // rows - self.add_widget_intelligent( - npyscreen.FixedText, - value=m, - editable=False, - relx=col_cnt+col*col_width, - rely=y_origin+row - ) - self.nextrely += rows - self.autoload_directory = self.add_widget_intelligent( - npyscreen.TitleFilename, - name='Import all .ckpt/.safetensors files from this directory ( to autocomplete):', - select_dir=True, - must_exist=True, - use_two_lines=False, - begin_entry_at=81, - value=os.path.expanduser('~'+'/'), - scroll_exit=True, - ) - self.autoload_onstartup = self.add_widget_intelligent( - npyscreen.Checkbox, - name='Scan this directory each time InvokeAI starts for new models to import.', - value=False, - scroll_exit=True, - ) - self.nextrely += 1 + for line in [ + 'Import checkpoint/safetensor models from the directory below.', + '(Use to autocomplete)' + ]: self.add_widget_intelligent( npyscreen.TitleText, - name='In the space below, you may cut and paste URLs, paths to .ckpt/.safetensor files, or HuggingFace diffusers repository names to import:', + name=line, editable=False, color="CONTROL", ) - self.model_names = self.add_widget_intelligent( - npyscreen.MultiLineEdit, - max_width=75, - max_height=16, - scroll_exit=True, - relx=18 - ) - self.autoload_onstartup = self.add_widget_intelligent( - npyscreen.TitleSelectOne, - name='Keep files in original format, or convert .ckpt/.safetensors into fast-loading diffusers models:', - values=['Original format','Convert to diffusers format'], - value=0, - scroll_exit=True, + self.nextrely -= 1 + self.autoload_directory = self.add_widget_intelligent( + npyscreen.TitleFilename, + name='Directory:', + select_dir=True, + must_exist=True, + use_two_lines=False, + value=os.path.expanduser('~'+'/'), + scroll_exit=True, + ) + self.autoload_onstartup = self.add_widget_intelligent( + npyscreen.Checkbox, + name='Scan this directory each time InvokeAI starts for new models to import.', + value=False, + scroll_exit=True, + ) + self.nextrely += 1 + for line in [ + 'In the space below, you may cut and paste URLs, paths to .ckpt/.safetensor files', + 'or HuggingFace diffusers repository names to import.', + '(Use control-V or shift-control-V to paste):' + ]: + self.add_widget_intelligent( + npyscreen.TitleText, + name=line, + editable=False, + color="CONTROL", ) + self.nextrely -= 1 + self.model_names = self.add_widget_intelligent( + npyscreen.MultiLineEdit, + max_width=75, + max_height=8, + scroll_exit=True, + relx=3 + ) + self.autoload_onstartup = self.add_widget_intelligent( + npyscreen.TitleSelectOne, + name='Keep files in original format, or convert .ckpt/.safetensors into fast-loading diffusers models:', + values=['Original format','Convert to diffusers format'], + value=0, + scroll_exit=True, + ) + self.find_next_editable() +# self.set_editing(self.models_selected) +# self.display() +# self.models_selected.editing=True +# self.models_selected.edit() - self.models_selected.editing = True + def _get_starter_model_labels(self): + window_height, window_width = curses.initscr().getmaxyx() + label_width = 25 + checkbox_width = 4 + spacing_width = 2 + description_width = window_width - label_width - checkbox_width - spacing_width + im = self.initial_models + names = list(im.keys()) + descriptions = [im[x].description [0:description_width-3]+'...' + if len(im[x].description) > description_width + else im[x].description + for x in im] + return [ + f"%-{label_width}s %s" % (names[x], descriptions[x]) for x in range(0,len(im)) + ] def on_ok(self): self.parentApp.setNextForm(None) @@ -607,12 +633,23 @@ def main(): try: select_and_download_models(opt) + except AssertionError as e: + print(str(e)) + sys.exit(-1) except KeyboardInterrupt: print("\nGoodbye! Come back soon.") - except Exception as e: - print(f'\nA problem occurred during initialization.\nThe error was: "{str(e)}"') - print(traceback.format_exc()) - + except (widget.NotEnoughSpaceForWidget, Exception) as e: + if str(e).startswith("Height of 1 allocated"): + print( + "** Insufficient vertical space for the interface. Please make your window taller and try again" + ) + elif str(e).startswith('addwstr'): + print( + '** Insufficient horizontal space for the interface. Please make your window wider and try again.' + ) + else: + print(f"** A layout error has occurred: {str(e)}") + sys.exit(-1) # ------------------------------------- if __name__ == "__main__": diff --git a/ldm/invoke/config/widgets.py b/ldm/invoke/config/widgets.py new file mode 100644 index 0000000000..59fc305ada --- /dev/null +++ b/ldm/invoke/config/widgets.py @@ -0,0 +1,70 @@ +''' +Widget class definitions used by model_select.py, merge_diffusers.py and textual_inversion.py +''' +import math +import npyscreen +import curses + +class FloatSlider(npyscreen.Slider): + # this is supposed to adjust display precision, but doesn't + def translate_value(self): + stri = "%3.2f / %3.2f" % (self.value, self.out_of) + l = (len(str(self.out_of))) * 2 + 4 + stri = stri.rjust(l) + return stri + +class FloatTitleSlider(npyscreen.TitleText): + _entry_type = FloatSlider + +class MultiSelectColumns(npyscreen.MultiSelect): + def __init__(self, screen, columns: int=1, values: list=[], **keywords): + self.columns = columns + self.value_cnt = len(values) + self.rows = math.ceil(self.value_cnt / self.columns) + super().__init__(screen,values=values, **keywords) + + def make_contained_widgets(self): + self._my_widgets = [] + column_width = self.width // self.columns + for h in range(self.value_cnt): + self._my_widgets.append( + self._contained_widgets(self.parent, + rely=self.rely + (h % self.rows) * self._contained_widget_height, + relx=self.relx + (h // self.rows) * column_width, + max_width=column_width, + max_height=self.__class__._contained_widget_height, + ) + ) + + def set_up_handlers(self): + super().set_up_handlers() + self.handlers.update({ + curses.KEY_UP: self.h_cursor_line_left, + curses.KEY_DOWN: self.h_cursor_line_right, + } + ) + def h_cursor_line_down(self, ch): + self.cursor_line += self.rows + if self.cursor_line >= len(self.values): + if self.scroll_exit: + self.cursor_line = len(self.values)-self.rows + self.h_exit_down(ch) + return True + else: + self.cursor_line -= self.rows + return True + + def h_cursor_line_up(self, ch): + self.cursor_line -= self.rows + if self.cursor_line < 0: + if self.scroll_exit: + self.cursor_line = 0 + self.h_exit_up(ch) + else: + self.cursor_line = 0 + + def h_cursor_line_left(self,ch): + super().h_cursor_line_up(ch) + + def h_cursor_line_right(self,ch): + super().h_cursor_line_down(ch) diff --git a/ldm/invoke/merge_diffusers.py b/ldm/invoke/merge_diffusers.py index c463bd8e8e..ea7dfb4294 100644 --- a/ldm/invoke/merge_diffusers.py +++ b/ldm/invoke/merge_diffusers.py @@ -20,6 +20,7 @@ from diffusers import logging as dlogging from npyscreen import widget from omegaconf import OmegaConf +from ldm.invoke.config.widgets import FloatTitleSlider from ldm.invoke.globals import (Globals, global_cache_dir, global_config_file, global_models_dir, global_set_root) from ldm.invoke.model_manager import ModelManager @@ -172,17 +173,6 @@ def _parse_args() -> Namespace: # ------------------------- GUI HERE ------------------------- -class FloatSlider(npyscreen.Slider): - # this is supposed to adjust display precision, but doesn't - def translate_value(self): - stri = "%3.2f / %3.2f" % (self.value, self.out_of) - l = (len(str(self.out_of))) * 2 + 4 - stri = stri.rjust(l) - return stri - -class FloatTitleSlider(npyscreen.TitleText): - _entry_type = FloatSlider - class mergeModelsForm(npyscreen.FormMultiPageAction): interpolations = ["weighted_sum", "sigmoid", "inv_sigmoid", "add_difference"] From e87a2fe14b0d4670b7c063f3689e139787ef3d72 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Wed, 15 Feb 2023 01:07:39 -0500 Subject: [PATCH 06/36] model installer frontend done - needs to be hooked to backend --- ldm/invoke/config/model_install.py | 585 +++++++----------------- ldm/invoke/config/model_install_util.py | 378 +++++++++++++++ ldm/invoke/config/widgets.py | 27 ++ 3 files changed, 570 insertions(+), 420 deletions(-) create mode 100644 ldm/invoke/config/model_install_util.py diff --git a/ldm/invoke/config/model_install.py b/ldm/invoke/config/model_install.py index 84ba124658..415c99f8f8 100644 --- a/ldm/invoke/config/model_install.py +++ b/ldm/invoke/config/model_install.py @@ -3,79 +3,27 @@ # Before running stable-diffusion on an internet-isolated machine, # run this script from one with internet connectivity. The # two machines must share a common .cache directory. -# -# Coauthor: Kevin Turner http://github.com/keturn -# + import argparse import curses import os -import re -import shutil import sys import traceback -import warnings from argparse import Namespace -from math import ceil -from pathlib import Path -from tempfile import TemporaryFile +from typing import List import npyscreen -import requests -from diffusers import AutoencoderKL -from huggingface_hub import hf_hub_url +import torch from npyscreen import widget from omegaconf import OmegaConf -from omegaconf.dictconfig import DictConfig -from tqdm import tqdm - -import invokeai.configs as configs -from ldm.invoke.devices import choose_precision, choose_torch_device -from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline -from ldm.invoke.globals import Globals, global_cache_dir, global_config_dir -from ldm.invoke.config.widgets import MultiSelectColumns - -warnings.filterwarnings("ignore") -import torch - -# --------------------------globals----------------------- -Model_dir = "models" -Weights_dir = "ldm/stable-diffusion-v1/" - -# the initial "configs" dir is now bundled in the `invokeai.configs` package -Dataset_path = Path(configs.__path__[0]) / "INITIAL_MODELS.yaml" - -Default_config_file = Path(global_config_dir()) / "models.yaml" -SD_Configs = Path(global_config_dir()) / "stable-diffusion" - -Datasets = OmegaConf.load(Dataset_path) - -Config_preamble = """# This file describes the alternative machine learning models -# available to InvokeAI script. -# -# To add a new model, follow the examples below. Each -# model requires a model config file, a weights file, -# and the width and height of the images it -# was trained on. -""" - -# ------------------------------------- -def yes_or_no(prompt: str, default_yes=True): - default = "y" if default_yes else "n" - response = input(f"{prompt} [{default}] ") or default - if default_yes: - return response[0] not in ("n", "N") - else: - return response[0] in ("y", "Y") - -# ------------------------------------- -def get_root(root: str = None) -> str: - if root: - return root - elif os.environ.get("INVOKEAI_ROOT"): - return os.environ.get("INVOKEAI_ROOT") - else: - return Globals.root +from ..devices import choose_precision, choose_torch_device +from ..globals import Globals +from .widgets import MultiSelectColumns, TextBox +from .model_install_util import (Dataset_path, Default_config_file, + default_dataset, download_weight_datasets, + update_config_file, get_root + ) class addModelsForm(npyscreen.FormMultiPageAction): def __init__(self, parentApp, name): @@ -98,38 +46,53 @@ class addModelsForm(npyscreen.FormMultiPageAction): for x in self.starter_model_list if self.initial_models[x].get("recommended", False) ] - previously_installed_models = sorted( + self.installed_models = sorted( [ x for x in list(self.initial_models.keys()) if x in self.existing_models ] ) - if len(previously_installed_models) > 0: - title = self.add_widget_intelligent( - npyscreen.TitleText, - name="Currently installed starter models. Uncheck to delete:", + self.add_widget_intelligent( + npyscreen.FixedText, + value='Use ctrl-N and ctrl-P to move to the ext and

revious fields,', + editable=False, + ) + self.add_widget_intelligent( + npyscreen.FixedText, + value='cursor arrows to make a selection, and space to toggle checkboxes.', + editable=False, + ) + + if len(self.installed_models) > 0: + self.add_widget_intelligent( + npyscreen.TitleFixedText, + name="== INSTALLED STARTER MODELS ==", + value="Currently installed starter models. Uncheck to delete:", + begin_entry_at=2, editable=False, color="CONTROL", ) - self.nextrely -= 1 - columns = 3 + columns = self._get_columns() self.previously_installed_models = self.add_widget_intelligent( MultiSelectColumns, columns=columns, - values=previously_installed_models, - value=[x for x in range(0,len(previously_installed_models))], - max_height=len(previously_installed_models)+1 // columns, + values=self.installed_models, + value=[x for x in range(0,len(self.installed_models))], + max_height=2+len(self.installed_models) // columns, + relx = 4, slow_scroll=True, scroll_exit = True, ) + self.add_widget_intelligent( - npyscreen.TitleText, - name="Select from a starter set of Stable Diffusion models from HuggingFace:", + npyscreen.TitleFixedText, + name="== UNINSTALLED STARTER MODELS ==", + value="Select from a starter set of Stable Diffusion models from HuggingFace:", + begin_entry_at=2, editable=False, color="CONTROL", ) - self.nextrely -= 2 - self.add_widget_intelligent(npyscreen.FixedText, value="", editable=False), + self.nextrely -= 1 self.models_selected = self.add_widget_intelligent( npyscreen.MultiSelect, name="Install Starter Models", @@ -140,39 +103,39 @@ class addModelsForm(npyscreen.FormMultiPageAction): if x in recommended_models ], max_height=len(starter_model_labels) + 1, + relx = 4, scroll_exit=True, ) - for line in [ - 'Import checkpoint/safetensor models from the directory below.', - '(Use to autocomplete)' - ]: - self.add_widget_intelligent( - npyscreen.TitleText, - name=line, - editable=False, - color="CONTROL", - ) - self.nextrely -= 1 + self.add_widget_intelligent( + npyscreen.TitleFixedText, + name='== MODEL IMPORT DIRECTORY ==', + value='Import all models found in this directory ( autocompletes):', + begin_entry_at=2, + editable=False, + color="CONTROL", + ) self.autoload_directory = self.add_widget_intelligent( npyscreen.TitleFilename, name='Directory:', select_dir=True, must_exist=True, use_two_lines=False, - value=os.path.expanduser('~'+'/'), + relx = 4, + labelColor='DANGER', scroll_exit=True, ) - self.autoload_onstartup = self.add_widget_intelligent( + self.autoscan_on_startup = self.add_widget_intelligent( npyscreen.Checkbox, name='Scan this directory each time InvokeAI starts for new models to import.', value=False, + relx = 4, scroll_exit=True, ) self.nextrely += 1 for line in [ - 'In the space below, you may cut and paste URLs, paths to .ckpt/.safetensor files', - 'or HuggingFace diffusers repository names to import.', - '(Use control-V or shift-control-V to paste):' + '== INDIVIDUAL MODELS TO IMPORT ==', + 'Enter list of URLs, paths models or HuggingFace diffusers repository IDs.', + 'Use control-V or shift-control-V to paste:' ]: self.add_widget_intelligent( npyscreen.TitleText, @@ -181,27 +144,33 @@ class addModelsForm(npyscreen.FormMultiPageAction): color="CONTROL", ) self.nextrely -= 1 - self.model_names = self.add_widget_intelligent( - npyscreen.MultiLineEdit, - max_width=75, + self.import_model_paths = self.add_widget_intelligent( + TextBox, max_height=8, scroll_exit=True, - relx=3 + editable=True, + relx=4 ) - self.autoload_onstartup = self.add_widget_intelligent( + self.nextrely += 2 + self.convert_models = self.add_widget_intelligent( npyscreen.TitleSelectOne, - name='Keep files in original format, or convert .ckpt/.safetensors into fast-loading diffusers models:', - values=['Original format','Convert to diffusers format'], + name='== CONVERT IMPORTED MODELS INTO DIFFUSERS==', + values=['Keep original format','Convert to diffusers'], value=0, + begin_entry_at=4, scroll_exit=True, ) - self.find_next_editable() -# self.set_editing(self.models_selected) -# self.display() -# self.models_selected.editing=True -# self.models_selected.edit() + + def resize(self): + super().resize() + self.models_selected.values = self._get_starter_model_labels() + # thought this would dynamically resize the widget, but no luck + # self.previously_installed_models.columns = self._get_columns() + # self.previously_installed_models.max_height = 2+len(self.installed_models) // self._get_columns() + # self.previously_installed_models.make_contained_widgets() + # self.previously_installed_models.display() - def _get_starter_model_labels(self): + def _get_starter_model_labels(self)->List[str]: window_height, window_width = curses.initscr().getmaxyx() label_width = 25 checkbox_width = 4 @@ -217,17 +186,89 @@ class addModelsForm(npyscreen.FormMultiPageAction): f"%-{label_width}s %s" % (names[x], descriptions[x]) for x in range(0,len(im)) ] + def _get_columns(self)->int: + window_height, window_width = curses.initscr().getmaxyx() + return 4 if window_width > 240 else 3 if window_width>160 else 2 if window_width>80 else 1 + def on_ok(self): - self.parentApp.setNextForm(None) + self.parentApp.setNextForm('MONITOR_OUTPUT') self.editing = False - self.parentApp.selected_models = [ - self.starter_model_list[x] for x in self.models_selected.value - ] - npyscreen.notify(f"Installing selected {self.parentApp.selected_models}") + self.parentApp.user_cancelled = False + self.marshall_arguments() def on_cancel(self): self.parentApp.setNextForm(None) - self.parentApp.selected_models = None + self.ParentApp.user_cancelled = True + self.editing = False + + def marshall_arguments(self): + ''' + Assemble arguments and store as attributes of the application: + .starter_models: dict of model names to install from INITIAL_CONFIGURE.yaml + True => Install + False => Remove + .scan_directory: Path to a directory of models to scan and import + .autoscan_on_startup: True if invokeai should scan and import at startup time + .import_model_paths: list of URLs, repo_ids and file paths to import + .convert_to_diffusers: if True, convert legacy checkpoints into diffusers + ''' + # starter models to install/remove + model_names = list(self.initial_models.keys()) + starter_models = dict(map(lambda x: (model_names[x], True), self.models_selected.value)) + if hasattr(self,'previously_installed_models'): + unchecked = [ + self.previously_installed_models.values[x] + for x in range(0,len(self.previously_installed_models.values)) + if x not in self.previously_installed_models.value + ] + starter_models.update( + map(lambda x: (x, False), unchecked) + ) + self.parentApp.starter_models=starter_models + + # load directory and whether to scan on startup + self.parentApp.scan_directory = self.autoload_directory.value + self.parentApp.autoscan_on_startup = self.autoscan_on_startup.value + + # URLs and the like + self.parentApp.import_model_paths = self.import_model_paths.value.split() + self.parentApp.convert_to_diffusers = self.convert_models.value != 0 + +class Log(object): + def __init__(self, writable): + self.writable = writable + + def __enter__(self): + self._stdout = sys.stdout + sys.stdout = self.writable + return self + def __exit__(self, *args): + sys.stdout = self._stdout + +class outputForm(npyscreen.ActionForm): + def create(self): + self.buffer = self.add_widget( + npyscreen.BufferPager, + editable=False, + ) + + def write(self,string): + if string != '\n': + self.buffer.buffer([string]) + + def beforeEditing(self): + myapplication = self.parentApp + with Log(self): + print(f'DEBUG: these models will be removed: {[x for x in myapplication.starter_models if not myapplication.starter_models[x]]}') + print(f'DEBUG: these models will be installed: {[x for x in myapplication.starter_models if myapplication.starter_models[x]]}') + print(f'DEBUG: this directory will be scanned: {myapplication.scan_directory}') + print(f'DEBUG: scan at startup time? {myapplication.autoscan_on_startup}') + print(f'DEBUG: these things will be downloaded: {myapplication.import_model_paths}') + print(f'DEBUG: convert to diffusers? {myapplication.convert_to_diffusers}') + + def on_ok(self): + self.buffer.buffer(['goodbye!']) + self.parentApp.setNextForm(None) self.editing = False class AddModelApplication(npyscreen.NPSAppManaged): @@ -242,316 +283,12 @@ class AddModelApplication(npyscreen.NPSAppManaged): addModelsForm, name="Add/Remove Models", ) - - -# --------------------------------------------- -def recommended_datasets() -> dict: - datasets = dict() - for ds in Datasets.keys(): - if Datasets[ds].get("recommended", False): - datasets[ds] = True - return datasets - - -# --------------------------------------------- -def default_dataset() -> dict: - datasets = dict() - for ds in Datasets.keys(): - if Datasets[ds].get("default", False): - datasets[ds] = True - return datasets - - -# --------------------------------------------- -def all_datasets() -> dict: - datasets = dict() - for ds in Datasets.keys(): - datasets[ds] = True - return datasets - - -# --------------------------------------------- -# look for legacy model.ckpt in models directory and offer to -# normalize its name -def migrate_models_ckpt(): - model_path = os.path.join(Globals.root, Model_dir, Weights_dir) - if not os.path.exists(os.path.join(model_path, "model.ckpt")): - return - new_name = Datasets["stable-diffusion-1.4"]["file"] - print('You seem to have the Stable Diffusion v4.1 "model.ckpt" already installed.') - rename = yes_or_no(f'Ok to rename it to "{new_name}" for future reference?') - if rename: - print(f"model.ckpt => {new_name}") - os.replace( - os.path.join(model_path, "model.ckpt"), os.path.join(model_path, new_name) + self.output = self.addForm( + 'MONITOR_OUTPUT', + outputForm, + name='Model Install Output' ) - -# --------------------------------------------- -def download_weight_datasets( - models: dict, access_token: str, precision: str = "float32" -): - migrate_models_ckpt() - successful = dict() - for mod in models.keys(): - print(f"Downloading {mod}:") - successful[mod] = _download_repo_or_file( - Datasets[mod], access_token, precision=precision - ) - return successful - - -def _download_repo_or_file( - mconfig: DictConfig, access_token: str, precision: str = "float32" -) -> Path: - path = None - if mconfig["format"] == "ckpt": - path = _download_ckpt_weights(mconfig, access_token) - else: - path = _download_diffusion_weights(mconfig, access_token, precision=precision) - if "vae" in mconfig and "repo_id" in mconfig["vae"]: - _download_diffusion_weights( - mconfig["vae"], access_token, precision=precision - ) - return path - - -def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path: - repo_id = mconfig["repo_id"] - filename = mconfig["file"] - cache_dir = os.path.join(Globals.root, Model_dir, Weights_dir) - return hf_download_with_resume( - repo_id=repo_id, - model_dir=cache_dir, - model_name=filename, - access_token=access_token, - ) - - -# --------------------------------------------- -def download_from_hf( - model_class: object, model_name: str, cache_subdir: Path = Path("hub"), **kwargs -): - print("", file=sys.stderr) # to prevent tqdm from overwriting - path = global_cache_dir(cache_subdir) - model = model_class.from_pretrained( - model_name, - cache_dir=path, - resume_download=True, - **kwargs, - ) - model_name = "--".join(("models", *model_name.split("/"))) - return path / model_name if model else None - - -def _download_diffusion_weights( - mconfig: DictConfig, access_token: str, precision: str = "float32" -): - repo_id = mconfig["repo_id"] - model_class = ( - StableDiffusionGeneratorPipeline - if mconfig.get("format", None) == "diffusers" - else AutoencoderKL - ) - extra_arg_list = [{"revision": "fp16"}, {}] if precision == "float16" else [{}] - path = None - for extra_args in extra_arg_list: - try: - path = download_from_hf( - model_class, - repo_id, - cache_subdir="diffusers", - safety_checker=None, - **extra_args, - ) - except OSError as e: - if str(e).startswith("fp16 is not a valid"): - pass - else: - print(f"An unexpected error occurred while downloading the model: {e})") - if path: - break - return path - - -# --------------------------------------------- -def hf_download_with_resume( - repo_id: str, model_dir: str, model_name: str, access_token: str = None -) -> Path: - model_dest = Path(os.path.join(model_dir, model_name)) - os.makedirs(model_dir, exist_ok=True) - - url = hf_hub_url(repo_id, model_name) - - header = {"Authorization": f"Bearer {access_token}"} if access_token else {} - open_mode = "wb" - exist_size = 0 - - if os.path.exists(model_dest): - exist_size = os.path.getsize(model_dest) - header["Range"] = f"bytes={exist_size}-" - open_mode = "ab" - - resp = requests.get(url, headers=header, stream=True) - total = int(resp.headers.get("content-length", 0)) - - if ( - resp.status_code == 416 - ): # "range not satisfiable", which means nothing to return - print(f"* {model_name}: complete file found. Skipping.") - return model_dest - elif resp.status_code != 200: - print(f"** An error occurred during downloading {model_name}: {resp.reason}") - elif exist_size > 0: - print(f"* {model_name}: partial file found. Resuming...") - else: - print(f"* {model_name}: Downloading...") - - try: - if total < 2000: - print(f"*** ERROR DOWNLOADING {model_name}: {resp.text}") - return None - - with open(model_dest, open_mode) as file, tqdm( - desc=model_name, - initial=exist_size, - total=total + exist_size, - unit="iB", - unit_scale=True, - unit_divisor=1000, - ) as bar: - for data in resp.iter_content(chunk_size=1024): - size = file.write(data) - bar.update(size) - except Exception as e: - print(f"An error occurred while downloading {model_name}: {str(e)}") - return None - return model_dest - - -# --------------------------------------------- -def update_config_file(successfully_downloaded: dict, opt: dict): - config_file = ( - Path(opt.config_file) if opt.config_file is not None else Default_config_file - ) - - # In some cases (incomplete setup, etc), the default configs directory might be missing. - # Create it if it doesn't exist. - # this check is ignored if opt.config_file is specified - user is assumed to know what they - # are doing if they are passing a custom config file from elsewhere. - if config_file is Default_config_file and not config_file.parent.exists(): - configs_src = Dataset_path.parent - configs_dest = Default_config_file.parent - shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True) - - yaml = new_config_file_contents(successfully_downloaded, config_file, opt) - - try: - backup = None - if os.path.exists(config_file): - print( - f"** {config_file.name} exists. Renaming to {config_file.stem}.yaml.orig" - ) - backup = config_file.with_suffix(".yaml.orig") - ## Ugh. Windows is unable to overwrite an existing backup file, raises a WinError 183 - if sys.platform == "win32" and backup.is_file(): - backup.unlink() - config_file.rename(backup) - - with TemporaryFile() as tmp: - tmp.write(Config_preamble.encode()) - tmp.write(yaml.encode()) - - with open(str(config_file.expanduser().resolve()), "wb") as new_config: - tmp.seek(0) - new_config.write(tmp.read()) - - except Exception as e: - print(f"**Error creating config file {config_file}: {str(e)} **") - if backup is not None: - print("restoring previous config file") - ## workaround, for WinError 183, see above - if sys.platform == "win32" and config_file.is_file(): - config_file.unlink() - backup.rename(config_file) - return - - print(f"Successfully created new configuration file {config_file}") - - -# --------------------------------------------- -def new_config_file_contents( - successfully_downloaded: dict, config_file: Path, opt: dict -) -> str: - if config_file.exists(): - conf = OmegaConf.load(str(config_file.expanduser().resolve())) - else: - conf = OmegaConf.create() - - default_selected = None - for model in successfully_downloaded: - # a bit hacky - what we are doing here is seeing whether a checkpoint - # version of the model was previously defined, and whether the current - # model is a diffusers (indicated with a path) - if conf.get(model) and Path(successfully_downloaded[model]).is_dir(): - offer_to_delete_weights(model, conf[model], opt.yes_to_all) - - stanza = {} - mod = Datasets[model] - stanza["description"] = mod["description"] - stanza["repo_id"] = mod["repo_id"] - stanza["format"] = mod["format"] - # diffusers don't need width and height (probably .ckpt doesn't either) - # so we no longer require these in INITIAL_MODELS.yaml - if "width" in mod: - stanza["width"] = mod["width"] - if "height" in mod: - stanza["height"] = mod["height"] - if "file" in mod: - stanza["weights"] = os.path.relpath( - successfully_downloaded[model], start=Globals.root - ) - stanza["config"] = os.path.normpath(os.path.join(SD_Configs, mod["config"])) - if "vae" in mod: - if "file" in mod["vae"]: - stanza["vae"] = os.path.normpath( - os.path.join(Model_dir, Weights_dir, mod["vae"]["file"]) - ) - else: - stanza["vae"] = mod["vae"] - if mod.get("default", False): - stanza["default"] = True - default_selected = True - - conf[model] = stanza - - # if no default model was chosen, then we select the first - # one in the list - if not default_selected: - conf[list(successfully_downloaded.keys())[0]]["default"] = True - - return OmegaConf.to_yaml(conf) - - -# --------------------------------------------- -def offer_to_delete_weights(model_name: str, conf_stanza: dict, yes_to_all: bool): - if not (weights := conf_stanza.get("weights")): - return - if re.match("/VAE/", conf_stanza.get("config")): - return - if yes_to_all or yes_or_no( - f"\n** The checkpoint version of {model_name} is superseded by the diffusers version. Delete the original file {weights}?", - default_yes=False, - ): - weights = Path(weights) - if not weights.is_absolute(): - weights = Path(Globals.root) / weights - try: - weights.unlink() - except OSError as e: - print(str(e)) - - # -------------------------------------------------------- def select_and_download_models(opt: Namespace): if opt.default_only: @@ -559,7 +296,14 @@ def select_and_download_models(opt: Namespace): else: myapplication = AddModelApplication() myapplication.run() - models_to_download = dict(map(lambda x: (x, True), myapplication.selected_models)) if myapplication.selected_models else None + if not myapplication.user_cancelled: + print(f'DEBUG: these models will be removed: {[x for x in myapplication.starter_models if not myapplication.starter_models[x]]}') + print(f'DEBUG: these models will be installed: {[x for x in myapplication.starter_models if myapplication.starter_models[x]]}') + print(f'DEBUG: this directory will be scanned: {myapplication.scan_directory}') + print(f'DEBUG: scan at startup time? {myapplication.autoscan_on_startup}') + print(f'DEBUG: these things will be downloaded: {myapplication.import_model_paths}') + print(f'DEBUG: convert to diffusers? {myapplication.convert_to_diffusers}') + sys.exit(0) if not models_to_download: print( @@ -649,6 +393,7 @@ def main(): ) else: print(f"** A layout error has occurred: {str(e)}") + traceback.print_exc() sys.exit(-1) # ------------------------------------- diff --git a/ldm/invoke/config/model_install_util.py b/ldm/invoke/config/model_install_util.py new file mode 100644 index 0000000000..1182815525 --- /dev/null +++ b/ldm/invoke/config/model_install_util.py @@ -0,0 +1,378 @@ +''' +Utility (backend) functions used by model_install.py +''' +import argparse +import os +import re +import shutil +import sys +import traceback +import warnings +from argparse import Namespace +from math import ceil +from pathlib import Path +from tempfile import TemporaryFile + +import npyscreen +import requests +from diffusers import AutoencoderKL +from huggingface_hub import hf_hub_url +from omegaconf import OmegaConf +from omegaconf.dictconfig import DictConfig +from tqdm import tqdm + +import invokeai.configs as configs +from ldm.invoke.devices import choose_precision, choose_torch_device +from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline +from ldm.invoke.globals import Globals, global_cache_dir, global_config_dir +from ldm.invoke.config.widgets import MultiSelectColumns + +warnings.filterwarnings("ignore") +import torch + +# --------------------------globals----------------------- +Model_dir = "models" +Weights_dir = "ldm/stable-diffusion-v1/" + +# the initial "configs" dir is now bundled in the `invokeai.configs` package +Dataset_path = Path(configs.__path__[0]) / "INITIAL_MODELS.yaml" + +Default_config_file = Path(global_config_dir()) / "models.yaml" +SD_Configs = Path(global_config_dir()) / "stable-diffusion" + +Datasets = OmegaConf.load(Dataset_path) + +Config_preamble = """# This file describes the alternative machine learning models +# available to InvokeAI script. +# +# To add a new model, follow the examples below. Each +# model requires a model config file, a weights file, +# and the width and height of the images it +# was trained on. +""" + +# ------------------------------------- +def yes_or_no(prompt: str, default_yes=True): + default = "y" if default_yes else "n" + response = input(f"{prompt} [{default}] ") or default + if default_yes: + return response[0] not in ("n", "N") + else: + return response[0] in ("y", "Y") + +# ------------------------------------- +def get_root(root: str = None) -> str: + if root: + return root + elif os.environ.get("INVOKEAI_ROOT"): + return os.environ.get("INVOKEAI_ROOT") + else: + return Globals.root + +# --------------------------------------------- +def recommended_datasets() -> dict: + datasets = dict() + for ds in Datasets.keys(): + if Datasets[ds].get("recommended", False): + datasets[ds] = True + return datasets + + +# --------------------------------------------- +def default_dataset() -> dict: + datasets = dict() + for ds in Datasets.keys(): + if Datasets[ds].get("default", False): + datasets[ds] = True + return datasets + + +# --------------------------------------------- +def all_datasets() -> dict: + datasets = dict() + for ds in Datasets.keys(): + datasets[ds] = True + return datasets + + +# --------------------------------------------- +# look for legacy model.ckpt in models directory and offer to +# normalize its name +def migrate_models_ckpt(): + model_path = os.path.join(Globals.root, Model_dir, Weights_dir) + if not os.path.exists(os.path.join(model_path, "model.ckpt")): + return + new_name = Datasets["stable-diffusion-1.4"]["file"] + print('You seem to have the Stable Diffusion v4.1 "model.ckpt" already installed.') + rename = yes_or_no(f'Ok to rename it to "{new_name}" for future reference?') + if rename: + print(f"model.ckpt => {new_name}") + os.replace( + os.path.join(model_path, "model.ckpt"), os.path.join(model_path, new_name) + ) + + +# --------------------------------------------- +def download_weight_datasets( + models: dict, access_token: str, precision: str = "float32" +): + migrate_models_ckpt() + successful = dict() + for mod in models.keys(): + print(f"Downloading {mod}:") + successful[mod] = _download_repo_or_file( + Datasets[mod], access_token, precision=precision + ) + return successful + + +def _download_repo_or_file( + mconfig: DictConfig, access_token: str, precision: str = "float32" +) -> Path: + path = None + if mconfig["format"] == "ckpt": + path = _download_ckpt_weights(mconfig, access_token) + else: + path = _download_diffusion_weights(mconfig, access_token, precision=precision) + if "vae" in mconfig and "repo_id" in mconfig["vae"]: + _download_diffusion_weights( + mconfig["vae"], access_token, precision=precision + ) + return path + + +def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path: + repo_id = mconfig["repo_id"] + filename = mconfig["file"] + cache_dir = os.path.join(Globals.root, Model_dir, Weights_dir) + return hf_download_with_resume( + repo_id=repo_id, + model_dir=cache_dir, + model_name=filename, + access_token=access_token, + ) + + +# --------------------------------------------- +def download_from_hf( + model_class: object, model_name: str, cache_subdir: Path = Path("hub"), **kwargs +): + print("", file=sys.stderr) # to prevent tqdm from overwriting + path = global_cache_dir(cache_subdir) + model = model_class.from_pretrained( + model_name, + cache_dir=path, + resume_download=True, + **kwargs, + ) + model_name = "--".join(("models", *model_name.split("/"))) + return path / model_name if model else None + + +def _download_diffusion_weights( + mconfig: DictConfig, access_token: str, precision: str = "float32" +): + repo_id = mconfig["repo_id"] + model_class = ( + StableDiffusionGeneratorPipeline + if mconfig.get("format", None) == "diffusers" + else AutoencoderKL + ) + extra_arg_list = [{"revision": "fp16"}, {}] if precision == "float16" else [{}] + path = None + for extra_args in extra_arg_list: + try: + path = download_from_hf( + model_class, + repo_id, + cache_subdir="diffusers", + safety_checker=None, + **extra_args, + ) + except OSError as e: + if str(e).startswith("fp16 is not a valid"): + pass + else: + print(f"An unexpected error occurred while downloading the model: {e})") + if path: + break + return path + + +# --------------------------------------------- +def hf_download_with_resume( + repo_id: str, model_dir: str, model_name: str, access_token: str = None +) -> Path: + model_dest = Path(os.path.join(model_dir, model_name)) + os.makedirs(model_dir, exist_ok=True) + + url = hf_hub_url(repo_id, model_name) + + header = {"Authorization": f"Bearer {access_token}"} if access_token else {} + open_mode = "wb" + exist_size = 0 + + if os.path.exists(model_dest): + exist_size = os.path.getsize(model_dest) + header["Range"] = f"bytes={exist_size}-" + open_mode = "ab" + + resp = requests.get(url, headers=header, stream=True) + total = int(resp.headers.get("content-length", 0)) + + if ( + resp.status_code == 416 + ): # "range not satisfiable", which means nothing to return + print(f"* {model_name}: complete file found. Skipping.") + return model_dest + elif resp.status_code != 200: + print(f"** An error occurred during downloading {model_name}: {resp.reason}") + elif exist_size > 0: + print(f"* {model_name}: partial file found. Resuming...") + else: + print(f"* {model_name}: Downloading...") + + try: + if total < 2000: + print(f"*** ERROR DOWNLOADING {model_name}: {resp.text}") + return None + + with open(model_dest, open_mode) as file, tqdm( + desc=model_name, + initial=exist_size, + total=total + exist_size, + unit="iB", + unit_scale=True, + unit_divisor=1000, + ) as bar: + for data in resp.iter_content(chunk_size=1024): + size = file.write(data) + bar.update(size) + except Exception as e: + print(f"An error occurred while downloading {model_name}: {str(e)}") + return None + return model_dest + + +# --------------------------------------------- +def update_config_file(successfully_downloaded: dict, opt: dict): + config_file = ( + Path(opt.config_file) if opt.config_file is not None else Default_config_file + ) + + # In some cases (incomplete setup, etc), the default configs directory might be missing. + # Create it if it doesn't exist. + # this check is ignored if opt.config_file is specified - user is assumed to know what they + # are doing if they are passing a custom config file from elsewhere. + if config_file is Default_config_file and not config_file.parent.exists(): + configs_src = Dataset_path.parent + configs_dest = Default_config_file.parent + shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True) + + yaml = new_config_file_contents(successfully_downloaded, config_file, opt) + + try: + backup = None + if os.path.exists(config_file): + print( + f"** {config_file.name} exists. Renaming to {config_file.stem}.yaml.orig" + ) + backup = config_file.with_suffix(".yaml.orig") + ## Ugh. Windows is unable to overwrite an existing backup file, raises a WinError 183 + if sys.platform == "win32" and backup.is_file(): + backup.unlink() + config_file.rename(backup) + + with TemporaryFile() as tmp: + tmp.write(Config_preamble.encode()) + tmp.write(yaml.encode()) + + with open(str(config_file.expanduser().resolve()), "wb") as new_config: + tmp.seek(0) + new_config.write(tmp.read()) + + except Exception as e: + print(f"**Error creating config file {config_file}: {str(e)} **") + if backup is not None: + print("restoring previous config file") + ## workaround, for WinError 183, see above + if sys.platform == "win32" and config_file.is_file(): + config_file.unlink() + backup.rename(config_file) + return + + print(f"Successfully created new configuration file {config_file}") + + +# --------------------------------------------- +def new_config_file_contents( + successfully_downloaded: dict, config_file: Path, opt: dict +) -> str: + if config_file.exists(): + conf = OmegaConf.load(str(config_file.expanduser().resolve())) + else: + conf = OmegaConf.create() + + default_selected = None + for model in successfully_downloaded: + # a bit hacky - what we are doing here is seeing whether a checkpoint + # version of the model was previously defined, and whether the current + # model is a diffusers (indicated with a path) + if conf.get(model) and Path(successfully_downloaded[model]).is_dir(): + offer_to_delete_weights(model, conf[model], opt.yes_to_all) + + stanza = {} + mod = Datasets[model] + stanza["description"] = mod["description"] + stanza["repo_id"] = mod["repo_id"] + stanza["format"] = mod["format"] + # diffusers don't need width and height (probably .ckpt doesn't either) + # so we no longer require these in INITIAL_MODELS.yaml + if "width" in mod: + stanza["width"] = mod["width"] + if "height" in mod: + stanza["height"] = mod["height"] + if "file" in mod: + stanza["weights"] = os.path.relpath( + successfully_downloaded[model], start=Globals.root + ) + stanza["config"] = os.path.normpath(os.path.join(SD_Configs, mod["config"])) + if "vae" in mod: + if "file" in mod["vae"]: + stanza["vae"] = os.path.normpath( + os.path.join(Model_dir, Weights_dir, mod["vae"]["file"]) + ) + else: + stanza["vae"] = mod["vae"] + if mod.get("default", False): + stanza["default"] = True + default_selected = True + + conf[model] = stanza + + # if no default model was chosen, then we select the first + # one in the list + if not default_selected: + conf[list(successfully_downloaded.keys())[0]]["default"] = True + + return OmegaConf.to_yaml(conf) + + +# --------------------------------------------- +def offer_to_delete_weights(model_name: str, conf_stanza: dict, yes_to_all: bool): + if not (weights := conf_stanza.get("weights")): + return + if re.match("/VAE/", conf_stanza.get("config")): + return + if yes_to_all or yes_or_no( + f"\n** The checkpoint version of {model_name} is superseded by the diffusers version. Delete the original file {weights}?", + default_yes=False, + ): + weights = Path(weights) + if not weights.is_absolute(): + weights = Path(Globals.root) / weights + try: + weights.unlink() + except OSError as e: + print(str(e)) + diff --git a/ldm/invoke/config/widgets.py b/ldm/invoke/config/widgets.py index 59fc305ada..b090f83e72 100644 --- a/ldm/invoke/config/widgets.py +++ b/ldm/invoke/config/widgets.py @@ -68,3 +68,30 @@ class MultiSelectColumns(npyscreen.MultiSelect): def h_cursor_line_right(self,ch): super().h_cursor_line_down(ch) + +class TextBox(npyscreen.MultiLineEdit): + def update(self, clear=True): + if clear: self.clear() + + HEIGHT = self.height + WIDTH = self.width + # draw box. + self.parent.curses_pad.hline(self.rely, self.relx, curses.ACS_HLINE, WIDTH) + self.parent.curses_pad.hline(self.rely + HEIGHT, self.relx, curses.ACS_HLINE, WIDTH) + self.parent.curses_pad.vline(self.rely, self.relx, curses.ACS_VLINE, self.height) + self.parent.curses_pad.vline(self.rely, self.relx+WIDTH, curses.ACS_VLINE, HEIGHT) + + # draw corners + self.parent.curses_pad.addch(self.rely, self.relx, curses.ACS_ULCORNER, ) + self.parent.curses_pad.addch(self.rely, self.relx+WIDTH, curses.ACS_URCORNER, ) + self.parent.curses_pad.addch(self.rely+HEIGHT, self.relx, curses.ACS_LLCORNER, ) + self.parent.curses_pad.addch(self.rely+HEIGHT, self.relx+WIDTH, curses.ACS_LRCORNER, ) + + # fool our superclass into thinking drawing area is smaller - this is really hacky but it seems to work + (relx,rely,height,width) = (self.relx, self.rely, self.height, self.width) + self.relx += 1 + self.rely += 1 + self.height -= 1 + self.width -= 1 + super().update(clear=False) + (self.relx,self.rely,self.height,self.width) = (relx, rely, height, width) From 1bb07795d8629b89a0e5f8b22ed129161746e641 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 16 Feb 2023 00:34:15 -0500 Subject: [PATCH 07/36] model installer downloads starter models + user-provided paths and repo_ids - Ability to scan directory not yet implemented - Can't download from Civitai due to incomplete URL download implementation --- ldm/invoke/config/model_install.py | 295 ++++++++++-------- ...stall_util.py => model_install_backend.py} | 151 ++++++--- ldm/invoke/model_manager.py | 75 ++++- 3 files changed, 341 insertions(+), 180 deletions(-) rename ldm/invoke/config/{model_install_util.py => model_install_backend.py} (72%) diff --git a/ldm/invoke/config/model_install.py b/ldm/invoke/config/model_install.py index 415c99f8f8..77de6cee21 100644 --- a/ldm/invoke/config/model_install.py +++ b/ldm/invoke/config/model_install.py @@ -4,6 +4,14 @@ # run this script from one with internet connectivity. The # two machines must share a common .cache directory. +''' +This is the npyscreen frontend to the model installation application. +The work is actually done in a backend file named model_install_backend.py, +and is kicked off in the beforeEditing() method in a form with +the class name "outputForm". This decision allows the output from the +installation process to be captured and displayed in an attractive form. +''' + import argparse import curses import os @@ -14,22 +22,23 @@ from typing import List import npyscreen import torch +from pathlib import Path from npyscreen import widget from omegaconf import OmegaConf from ..devices import choose_precision, choose_torch_device from ..globals import Globals from .widgets import MultiSelectColumns, TextBox -from .model_install_util import (Dataset_path, Default_config_file, - default_dataset, download_weight_datasets, - update_config_file, get_root - ) +from .model_install_backend import (Dataset_path, default_config_file, + install_requested_models, + default_dataset, get_root + ) class addModelsForm(npyscreen.FormMultiPageAction): def __init__(self, parentApp, name): self.initial_models = OmegaConf.load(Dataset_path) try: - self.existing_models = OmegaConf.load(Default_config_file) + self.existing_models = OmegaConf.load(default_config_file()) except: self.existing_models = dict() self.starter_model_list = [ @@ -51,7 +60,7 @@ class addModelsForm(npyscreen.FormMultiPageAction): x for x in list(self.initial_models.keys()) if x in self.existing_models ] ) - + self.nextrely -= 1 self.add_widget_intelligent( npyscreen.FixedText, value='Use ctrl-N and ctrl-P to move to the ext and

revious fields,', @@ -62,7 +71,7 @@ class addModelsForm(npyscreen.FormMultiPageAction): value='cursor arrows to make a selection, and space to toggle checkboxes.', editable=False, ) - + self.nextrely += 1 if len(self.installed_models) > 0: self.add_widget_intelligent( npyscreen.TitleFixedText, @@ -83,10 +92,15 @@ class addModelsForm(npyscreen.FormMultiPageAction): slow_scroll=True, scroll_exit = True, ) - + self.purge_deleted = self.add_widget_intelligent( + npyscreen.Checkbox, + name='Purge deleted models from disk', + value=False, + scroll_exit=True + ) self.add_widget_intelligent( npyscreen.TitleFixedText, - name="== UNINSTALLED STARTER MODELS ==", + name="== UNINSTALLED STARTER MODELS (recommended models selected) ==", value="Select from a starter set of Stable Diffusion models from HuggingFace:", begin_entry_at=2, editable=False, @@ -99,42 +113,16 @@ class addModelsForm(npyscreen.FormMultiPageAction): values=starter_model_labels, value=[ self.starter_model_list.index(x) - for x in self.initial_models + for x in self.starter_model_list if x in recommended_models ], max_height=len(starter_model_labels) + 1, relx = 4, scroll_exit=True, ) - self.add_widget_intelligent( - npyscreen.TitleFixedText, - name='== MODEL IMPORT DIRECTORY ==', - value='Import all models found in this directory ( autocompletes):', - begin_entry_at=2, - editable=False, - color="CONTROL", - ) - self.autoload_directory = self.add_widget_intelligent( - npyscreen.TitleFilename, - name='Directory:', - select_dir=True, - must_exist=True, - use_two_lines=False, - relx = 4, - labelColor='DANGER', - scroll_exit=True, - ) - self.autoscan_on_startup = self.add_widget_intelligent( - npyscreen.Checkbox, - name='Scan this directory each time InvokeAI starts for new models to import.', - value=False, - relx = 4, - scroll_exit=True, - ) - self.nextrely += 1 for line in [ - '== INDIVIDUAL MODELS TO IMPORT ==', - 'Enter list of URLs, paths models or HuggingFace diffusers repository IDs.', + '== IMPORT LOCAL AND REMOTE MODELS ==', + 'Enter URLs, file paths, or HuggingFace diffusers repository IDs separated by spaces.', 'Use control-V or shift-control-V to paste:' ]: self.add_widget_intelligent( @@ -151,7 +139,29 @@ class addModelsForm(npyscreen.FormMultiPageAction): editable=True, relx=4 ) - self.nextrely += 2 + self.nextrely += 1 + self.show_directory_fields= self.add_widget_intelligent( + npyscreen.FormControlCheckbox, + name='Select a directory for models to import', + value=False, + ) + self.autoload_directory = self.add_widget_intelligent( + npyscreen.TitleFilename, + name='Directory ( autocompletes):', + select_dir=True, + must_exist=True, + use_two_lines=False, + labelColor='DANGER', + begin_entry_at=34, + scroll_exit=True, + ) + self.autoscan_on_startup = self.add_widget_intelligent( + npyscreen.Checkbox, + name='Scan this directory each time InvokeAI starts for new models to import', + value=False, + relx = 4, + scroll_exit=True, + ) self.convert_models = self.add_widget_intelligent( npyscreen.TitleSelectOne, name='== CONVERT IMPORTED MODELS INTO DIFFUSERS==', @@ -160,15 +170,12 @@ class addModelsForm(npyscreen.FormMultiPageAction): begin_entry_at=4, scroll_exit=True, ) + for i in [self.autoload_directory,self.autoscan_on_startup]: + self.show_directory_fields.addVisibleWhenSelected(i) def resize(self): super().resize() self.models_selected.values = self._get_starter_model_labels() - # thought this would dynamically resize the widget, but no luck - # self.previously_installed_models.columns = self._get_columns() - # self.previously_installed_models.max_height = 2+len(self.installed_models) // self._get_columns() - # self.previously_installed_models.make_contained_widgets() - # self.previously_installed_models.display() def _get_starter_model_labels(self)->List[str]: window_height, window_width = curses.initscr().getmaxyx() @@ -177,13 +184,13 @@ class addModelsForm(npyscreen.FormMultiPageAction): spacing_width = 2 description_width = window_width - label_width - checkbox_width - spacing_width im = self.initial_models - names = list(im.keys()) + names = self.starter_model_list descriptions = [im[x].description [0:description_width-3]+'...' if len(im[x].description) > description_width else im[x].description - for x in im] + for x in names] return [ - f"%-{label_width}s %s" % (names[x], descriptions[x]) for x in range(0,len(im)) + f"%-{label_width}s %s" % (names[x], descriptions[x]) for x in range(0,len(names)) ] def _get_columns(self)->int: @@ -191,7 +198,7 @@ class addModelsForm(npyscreen.FormMultiPageAction): return 4 if window_width > 240 else 3 if window_width>160 else 2 if window_width>80 else 1 def on_ok(self): - self.parentApp.setNextForm('MONITOR_OUTPUT') + self.parentApp.setNextForm(None) self.editing = False self.parentApp.user_cancelled = False self.marshall_arguments() @@ -213,8 +220,7 @@ class addModelsForm(npyscreen.FormMultiPageAction): .convert_to_diffusers: if True, convert legacy checkpoints into diffusers ''' # starter models to install/remove - model_names = list(self.initial_models.keys()) - starter_models = dict(map(lambda x: (model_names[x], True), self.models_selected.value)) + starter_models = dict(map(lambda x: (self.starter_model_list[x], True), self.models_selected.value)) if hasattr(self,'previously_installed_models'): unchecked = [ self.previously_installed_models.values[x] @@ -224,52 +230,100 @@ class addModelsForm(npyscreen.FormMultiPageAction): starter_models.update( map(lambda x: (x, False), unchecked) ) + self.parentApp.purge_deleted_models = self.purge_deleted.value self.parentApp.starter_models=starter_models # load directory and whether to scan on startup - self.parentApp.scan_directory = self.autoload_directory.value - self.parentApp.autoscan_on_startup = self.autoscan_on_startup.value + if self.show_directory_fields.value: + self.parentApp.scan_directory = self.autoload_directory.value + self.parentApp.autoscan_on_startup = self.autoscan_on_startup.value + else: + self.parentApp.scan_directory = None + self.parentApp.autoscan_on_startup = False # URLs and the like self.parentApp.import_model_paths = self.import_model_paths.value.split() self.parentApp.convert_to_diffusers = self.convert_models.value != 0 -class Log(object): - def __init__(self, writable): - self.writable = writable +# big chunk of dead code +# was intended to be a status area in which output of installation steps (including tqdm) was logged in real time +# Problem is that this requires a fork() and pipe, and not sure this will work properly on windows. +# So not using, but keep this here in case it is useful later +# class Log(object): +# def __init__(self, writable): +# self.writable = writable - def __enter__(self): - self._stdout = sys.stdout - sys.stdout = self.writable - return self - def __exit__(self, *args): - sys.stdout = self._stdout +# def __enter__(self): +# self._stdout = sys.stdout +# sys.stdout = self.writable +# return self + +# def __exit__(self, *args): +# sys.stdout = self._stdout -class outputForm(npyscreen.ActionForm): - def create(self): - self.buffer = self.add_widget( - npyscreen.BufferPager, - editable=False, - ) +# class outputForm(npyscreen.ActionForm): +# def create(self): +# self.done = False +# self.buffer = self.add_widget( +# npyscreen.BufferPager, +# editable=False, +# ) - def write(self,string): - if string != '\n': - self.buffer.buffer([string]) +# def write(self,string): +# if string != '\n': +# self.buffer.buffer([string]) - def beforeEditing(self): - myapplication = self.parentApp - with Log(self): - print(f'DEBUG: these models will be removed: {[x for x in myapplication.starter_models if not myapplication.starter_models[x]]}') - print(f'DEBUG: these models will be installed: {[x for x in myapplication.starter_models if myapplication.starter_models[x]]}') - print(f'DEBUG: this directory will be scanned: {myapplication.scan_directory}') - print(f'DEBUG: scan at startup time? {myapplication.autoscan_on_startup}') - print(f'DEBUG: these things will be downloaded: {myapplication.import_model_paths}') - print(f'DEBUG: convert to diffusers? {myapplication.convert_to_diffusers}') +# def beforeEditing(self): +# if self.done: +# return +# installApp = self.parentApp +# with Log(self): +# models_to_remove = [x for x in installApp.starter_models if not installApp.starter_models[x]] +# models_to_install = [x for x in installApp.starter_models if installApp.starter_models[x]] +# directory_to_scan = installApp.scan_directory +# scan_at_startup = installApp.autoscan_on_startup +# potential_models_to_install = installApp.import_model_paths +# convert_to_diffusers = installApp.convert_to_diffusers + +# print(f'these models will be removed: {models_to_remove}') +# print(f'these models will be installed: {models_to_install}') +# print(f'this directory will be scanned: {directory_to_scan}') +# print(f'these things will be downloaded: {potential_models_to_install}') +# print(f'scan at startup time? {scan_at_startup}') +# print(f'convert to diffusers? {convert_to_diffusers}') +# print(f'\nPress OK to proceed or Cancel.') + +# def on_cancel(self): +# self.buffer.buffer(['goodbye!']) +# self.parentApp.setNextForm(None) +# self.editing = False + +# def on_ok(self): +# if self.done: +# self.on_cancel() +# return + +# installApp = self.parentApp +# with Log(self): +# models_to_remove = [x for x in installApp.starter_models if not installApp.starter_models[x]] +# models_to_install = [x for x in installApp.starter_models if installApp.starter_models[x]] +# directory_to_scan = installApp.scan_directory +# scan_at_startup = installApp.autoscan_on_startup +# potential_models_to_install = installApp.import_model_paths +# convert_to_diffusers = installApp.convert_to_diffusers + +# install_requested_models( +# install_initial_models = models_to_install, +# remove_models = models_to_remove, +# scan_directory = Path(directory_to_scan) if directory_to_scan else None, +# external_models = potential_models_to_install, +# scan_at_startup = scan_at_startup, +# convert_to_diffusers = convert_to_diffusers, +# precision = 'float32' if installApp.opt.full_precision else choose_precision(torch.device(choose_torch_device())), +# config_file_path = Path(installApp.opt.config_file) if installApp.opt.config_file else None, +# ) +# self.done = True - def on_ok(self): - self.buffer.buffer(['goodbye!']) - self.parentApp.setNextForm(None) - self.editing = False class AddModelApplication(npyscreen.NPSAppManaged): def __init__(self, saved_args=None): @@ -283,54 +337,43 @@ class AddModelApplication(npyscreen.NPSAppManaged): addModelsForm, name="Add/Remove Models", ) - self.output = self.addForm( - 'MONITOR_OUTPUT', - outputForm, - name='Model Install Output' - ) + # self.output = self.addForm( + # 'MONITOR_OUTPUT', + # outputForm, + # name='Model Install Output' + # ) +# -------------------------------------------------------- +def process_and_execute(app: npyscreen.NPSAppManaged): + models_to_remove = [x for x in app.starter_models if not app.starter_models[x]] + models_to_install = [x for x in app.starter_models if app.starter_models[x]] + directory_to_scan = app.scan_directory + scan_at_startup = app.autoscan_on_startup + potential_models_to_install = app.import_model_paths + convert_to_diffusers = app.convert_to_diffusers + + install_requested_models( + install_initial_models = models_to_install, + remove_models = models_to_remove, + scan_directory = Path(directory_to_scan) if directory_to_scan else None, + external_models = potential_models_to_install, + scan_at_startup = scan_at_startup, + convert_to_diffusers = convert_to_diffusers, + precision = 'float32' if app.opt.full_precision else choose_precision(torch.device(choose_torch_device())), + purge_deleted = app.purge_deleted_models, + config_file_path = Path(app.opt.config_file) if app.opt.config_file else None, + ) + # -------------------------------------------------------- def select_and_download_models(opt: Namespace): if opt.default_only: models_to_download = default_dataset() + install_requested_models(models_to_download) else: - myapplication = AddModelApplication() - myapplication.run() - if not myapplication.user_cancelled: - print(f'DEBUG: these models will be removed: {[x for x in myapplication.starter_models if not myapplication.starter_models[x]]}') - print(f'DEBUG: these models will be installed: {[x for x in myapplication.starter_models if myapplication.starter_models[x]]}') - print(f'DEBUG: this directory will be scanned: {myapplication.scan_directory}') - print(f'DEBUG: scan at startup time? {myapplication.autoscan_on_startup}') - print(f'DEBUG: these things will be downloaded: {myapplication.import_model_paths}') - print(f'DEBUG: convert to diffusers? {myapplication.convert_to_diffusers}') - sys.exit(0) - - if not models_to_download: - print( - '** No models were selected. To run this program again, select "Install initial models" from the invoke script.' - ) - return - - print("** Downloading and installing the selected models.") - precision = ( - "float32" - if opt.full_precision - else choose_precision(torch.device(choose_torch_device())) - ) - successfully_downloaded = download_weight_datasets( - models=models_to_download, - access_token=None, - precision=precision, - ) - - update_config_file(successfully_downloaded, opt) - if len(successfully_downloaded) < len(models_to_download): - print("** Some of the model downloads were not successful") - - print( - "\nYour starting models were installed. To find and add more models, see https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS" - ) - + installApp = AddModelApplication() + installApp.opt = opt + installApp.run() + process_and_execute(installApp) # ------------------------------------- def main(): @@ -392,7 +435,7 @@ def main(): '** Insufficient horizontal space for the interface. Please make your window wider and try again.' ) else: - print(f"** A layout error has occurred: {str(e)}") + print(f"** An error has occurred: {str(e)}") traceback.print_exc() sys.exit(-1) diff --git a/ldm/invoke/config/model_install_util.py b/ldm/invoke/config/model_install_backend.py similarity index 72% rename from ldm/invoke/config/model_install_util.py rename to ldm/invoke/config/model_install_backend.py index 1182815525..6a50dc5fcb 100644 --- a/ldm/invoke/config/model_install_util.py +++ b/ldm/invoke/config/model_install_backend.py @@ -1,35 +1,29 @@ -''' +""" Utility (backend) functions used by model_install.py -''' -import argparse +""" import os import re import shutil import sys -import traceback import warnings -from argparse import Namespace -from math import ceil from pathlib import Path from tempfile import TemporaryFile -import npyscreen import requests from diffusers import AutoencoderKL from huggingface_hub import hf_hub_url from omegaconf import OmegaConf from omegaconf.dictconfig import DictConfig from tqdm import tqdm +from typing import List import invokeai.configs as configs -from ldm.invoke.devices import choose_precision, choose_torch_device -from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline -from ldm.invoke.globals import Globals, global_cache_dir, global_config_dir -from ldm.invoke.config.widgets import MultiSelectColumns +from ..generator.diffusers_pipeline import StableDiffusionGeneratorPipeline +from ..globals import Globals, global_cache_dir, global_config_dir +from ..model_manager import ModelManager warnings.filterwarnings("ignore") -import torch - + # --------------------------globals----------------------- Model_dir = "models" Weights_dir = "ldm/stable-diffusion-v1/" @@ -37,12 +31,11 @@ Weights_dir = "ldm/stable-diffusion-v1/" # the initial "configs" dir is now bundled in the `invokeai.configs` package Dataset_path = Path(configs.__path__[0]) / "INITIAL_MODELS.yaml" -Default_config_file = Path(global_config_dir()) / "models.yaml" -SD_Configs = Path(global_config_dir()) / "stable-diffusion" +# initial models omegaconf +Datasets = None -Datasets = OmegaConf.load(Dataset_path) - -Config_preamble = """# This file describes the alternative machine learning models +Config_preamble = """ +# This file describes the alternative machine learning models # available to InvokeAI script. # # To add a new model, follow the examples below. Each @@ -51,6 +44,60 @@ Config_preamble = """# This file describes the alternative machine learning mode # was trained on. """ +def default_config_file(): + return Path(global_config_dir()) / "models.yaml" + +def sd_configs(): + return Path(global_config_dir()) / "stable-diffusion" + +def initial_models(): + global Datasets + if Datasets: + return Datasets + return (Datasets := OmegaConf.load(Dataset_path)) + + +def install_requested_models( + install_initial_models: List[str] = None, + remove_models: List[str] = None, + scan_directory: Path = None, + external_models: List[str] = None, + scan_at_startup: bool = False, + convert_to_diffusers: bool = False, + precision: str = "float16", + purge_deleted: bool = False, + config_file_path: Path = None, +): + config_file_path =config_file_path or default_config_file() + model_manager = ModelManager(OmegaConf.load(config_file_path),precision=precision) + + if remove_models and len(remove_models) > 0: + print("== DELETING UNCHECKED STARTER MODELS ==") + for model in remove_models: + print(f'{model}...') + model_manager.del_model(model, delete_files=purge_deleted) + model_manager.commit(config_file_path) + + if install_initial_models and len(install_initial_models) > 0: + print("== INSTALLING SELECTED STARTER MODELS ==") + successfully_downloaded = download_weight_datasets( + models=install_initial_models, + access_token=None, + precision=precision, + ) # for historical reasons, we don't use model manager here + update_config_file(successfully_downloaded, config_file_path) + if len(successfully_downloaded) < len(install_initial_models): + print("** Some of the model downloads were not successful") + + if external_models and len(external_models)>0: + print("== INSTALLING EXTERNAL MODELS ==") + for path_url_or_repo in external_models: + model_manager.heuristic_import( + path_url_or_repo, + convert=convert_to_diffusers, + commit_to_conf=config_file_path + ) + # ------------------------------------- def yes_or_no(prompt: str, default_yes=True): default = "y" if default_yes else "n" @@ -60,6 +107,7 @@ def yes_or_no(prompt: str, default_yes=True): else: return response[0] in ("y", "Y") + # ------------------------------------- def get_root(root: str = None) -> str: if root: @@ -69,11 +117,12 @@ def get_root(root: str = None) -> str: else: return Globals.root + # --------------------------------------------- def recommended_datasets() -> dict: datasets = dict() - for ds in Datasets.keys(): - if Datasets[ds].get("recommended", False): + for ds in initial_models().keys(): + if initial_models()[ds].get("recommended", False): datasets[ds] = True return datasets @@ -81,8 +130,8 @@ def recommended_datasets() -> dict: # --------------------------------------------- def default_dataset() -> dict: datasets = dict() - for ds in Datasets.keys(): - if Datasets[ds].get("default", False): + for ds in initial_models().keys(): + if initial_models()[ds].get("default", False): datasets[ds] = True return datasets @@ -90,7 +139,7 @@ def default_dataset() -> dict: # --------------------------------------------- def all_datasets() -> dict: datasets = dict() - for ds in Datasets.keys(): + for ds in initial_models().keys(): datasets[ds] = True return datasets @@ -102,26 +151,24 @@ def migrate_models_ckpt(): model_path = os.path.join(Globals.root, Model_dir, Weights_dir) if not os.path.exists(os.path.join(model_path, "model.ckpt")): return - new_name = Datasets["stable-diffusion-1.4"]["file"] - print('You seem to have the Stable Diffusion v4.1 "model.ckpt" already installed.') - rename = yes_or_no(f'Ok to rename it to "{new_name}" for future reference?') - if rename: - print(f"model.ckpt => {new_name}") - os.replace( - os.path.join(model_path, "model.ckpt"), os.path.join(model_path, new_name) - ) + new_name = initial_models()["stable-diffusion-1.4"]["file"] + print('The Stable Diffusion v4.1 "model.ckpt" is already installed. The name will be changed to {new_name} to avoid confusion.') + print(f"model.ckpt => {new_name}") + os.replace( + os.path.join(model_path, "model.ckpt"), os.path.join(model_path, new_name) + ) # --------------------------------------------- def download_weight_datasets( - models: dict, access_token: str, precision: str = "float32" + models: List[str], access_token: str, precision: str = "float32" ): migrate_models_ckpt() successful = dict() - for mod in models.keys(): + for mod in models: print(f"Downloading {mod}:") successful[mod] = _download_repo_or_file( - Datasets[mod], access_token, precision=precision + initial_models()[mod], access_token, precision=precision ) return successful @@ -255,21 +302,21 @@ def hf_download_with_resume( # --------------------------------------------- -def update_config_file(successfully_downloaded: dict, opt: dict): +def update_config_file(successfully_downloaded: dict, config_file: Path): config_file = ( - Path(opt.config_file) if opt.config_file is not None else Default_config_file + Path(config_file) if config_file is not None else default_config_file() ) # In some cases (incomplete setup, etc), the default configs directory might be missing. # Create it if it doesn't exist. # this check is ignored if opt.config_file is specified - user is assumed to know what they # are doing if they are passing a custom config file from elsewhere. - if config_file is Default_config_file and not config_file.parent.exists(): + if config_file is default_config_file() and not config_file.parent.exists(): configs_src = Dataset_path.parent - configs_dest = Default_config_file.parent + configs_dest = default_config_file().parent shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True) - yaml = new_config_file_contents(successfully_downloaded, config_file, opt) + yaml = new_config_file_contents(successfully_downloaded, config_file) try: backup = None @@ -306,7 +353,7 @@ def update_config_file(successfully_downloaded: dict, opt: dict): # --------------------------------------------- def new_config_file_contents( - successfully_downloaded: dict, config_file: Path, opt: dict + successfully_downloaded: dict, config_file: Path, ) -> str: if config_file.exists(): conf = OmegaConf.load(str(config_file.expanduser().resolve())) @@ -319,10 +366,10 @@ def new_config_file_contents( # version of the model was previously defined, and whether the current # model is a diffusers (indicated with a path) if conf.get(model) and Path(successfully_downloaded[model]).is_dir(): - offer_to_delete_weights(model, conf[model], opt.yes_to_all) + delete_weights(model, conf[model]) stanza = {} - mod = Datasets[model] + mod = initial_models()[model] stanza["description"] = mod["description"] stanza["repo_id"] = mod["repo_id"] stanza["format"] = mod["format"] @@ -336,7 +383,7 @@ def new_config_file_contents( stanza["weights"] = os.path.relpath( successfully_downloaded[model], start=Globals.root ) - stanza["config"] = os.path.normpath(os.path.join(SD_Configs, mod["config"])) + stanza["config"] = os.path.normpath(os.path.join(sd_configs(), mod["config"])) if "vae" in mod: if "file" in mod["vae"]: stanza["vae"] = os.path.normpath( @@ -359,20 +406,20 @@ def new_config_file_contents( # --------------------------------------------- -def offer_to_delete_weights(model_name: str, conf_stanza: dict, yes_to_all: bool): +def delete_weights(model_name: str, conf_stanza: dict): if not (weights := conf_stanza.get("weights")): return if re.match("/VAE/", conf_stanza.get("config")): return - if yes_to_all or yes_or_no( - f"\n** The checkpoint version of {model_name} is superseded by the diffusers version. Delete the original file {weights}?", - default_yes=False, - ): - weights = Path(weights) - if not weights.is_absolute(): - weights = Path(Globals.root) / weights + + print( + f"\n** The checkpoint version of {model_name} is superseded by the diffusers version. Deleting the original file {weights}?" + ) + + weights = Path(weights) + if not weights.is_absolute(): + weights = Path(Globals.root) / weights try: weights.unlink() except OSError as e: print(str(e)) - diff --git a/ldm/invoke/model_manager.py b/ldm/invoke/model_manager.py index 0f3affe5f7..7042c9eb90 100644 --- a/ldm/invoke/model_manager.py +++ b/ldm/invoke/model_manager.py @@ -11,6 +11,7 @@ import gc import hashlib import io import os +import re import sys import textwrap import time @@ -699,7 +700,78 @@ class ModelManager(object): self.commit(commit_to_conf) return True - def autoconvert_weights( + def heuristic_import( + self, + path_url_or_repo: str, + convert: bool= False, + commit_to_conf: Path=None, + ): + model_path = None + thing = path_url_or_repo # to save typing + + if thing.startswith(('http:','https:','ftp:')): + print(f'* {thing} appears to be a URL') + model_path = self._resolve_path(thing, 'models/ldm/stable-diffusion-v1') # _resolve_path does a download if needed + + elif Path(thing).is_file() and thing.endswith(('.ckpt','.safetensors')): + print(f'* {thing} appears to be a checkpoint file on disk') + model_path = self._resolve_path(thing, 'models/ldm/stable-diffusion-v1') + + elif Path(thing).is_dir() and Path(thing, 'model_index.json').exists(): + print(f'* {thing} appears to be a diffusers file on disk') + self.import_diffusers_model(thing, commit_to_conf=commit_to_conf) + + elif Path(thing).is_dir(): + print(f'* {thing} appears to be a directory. Will scan for models to import') + for m in list(Path(thing).rglob('*.ckpt')) + list(Path(thing).rglob('*.safetensors')): + self.heuristic_import(m, convert, commit_to_conf=commit_to_conf) + return + + elif re.match(r'^[\w.+-]+/[\w.+-]+$', thing): + print(f'* {thing} appears to be a HuggingFace diffusers repo_id') + self.import_diffuser_model(thing, commit_to_conf=commit_to_conf) + + else: + print(f"* {thing}: Unknown thing. Please provide a URL, file path, directory or HuggingFace repo_id") + + # Model_path is set in the event of a legacy checkpoint file. + # If not set, we're all done + if not model_path: + return + + # another round of heuristics to guess the correct config file. + model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inpainting-inference.yaml') + + checkpoint = safetensors.torch.load_file(model_path) if model_path.suffix == '.safetensors' else torch.load(model_path) + key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" + if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024: + print(f'* {thing} appears to be an SD-v2 model; model will be converted to diffusers format') + model_config_file = Path(Globals.root,'configs/stable-diffusion/v2-inference-v.yaml') + convert = True + elif re.search('inpaint', model_path, flags=re.IGNORECASE): + print(f'* {thing} appears to be an SD-v1 inpainting model') + model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inpainting-inference.yaml') + else: + print(f'* {thing} appears to be an SD-v1 model') + + if convert: + diffuser_path = Path(Globals.root, 'models',Globals.converted_ckpts_dir, model_path.stem) + self.convert_and_import( + model_path, + diffusers_path=diffuser_path, + vae=dict(repo_id='stabilityai/sd-vae-ft-mse'), + original_config_file=model_config_file, + commit_to_conf=commit_to_conf, + ) + else: + self.import_ckpt_model( + model_path, + config=model_config_file, + vae=Path(Globals.root,'models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt'), + commit_to_conf=commit_to_conf, + ) + + def autoconvert_weights ( self, conf_path: Path, weights_directory: Path = None, @@ -750,7 +822,6 @@ class ModelManager(object): into models.yaml. """ new_config = None - import transformers from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffuser From 07be605dcb9e2d47ba03acea826c16d7d2717de0 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 16 Feb 2023 01:30:59 -0500 Subject: [PATCH 08/36] mostly working --- ldm/invoke/config/model_install.py | 2 +- ldm/invoke/config/model_install_backend.py | 21 +++++++++++++-------- ldm/invoke/generator/diffusers_pipeline.py | 1 + ldm/invoke/model_manager.py | 20 ++++++++++++++------ ldm/util.py | 2 -- 5 files changed, 29 insertions(+), 17 deletions(-) diff --git a/ldm/invoke/config/model_install.py b/ldm/invoke/config/model_install.py index 77de6cee21..9c4a4d0e61 100644 --- a/ldm/invoke/config/model_install.py +++ b/ldm/invoke/config/model_install.py @@ -243,7 +243,7 @@ class addModelsForm(npyscreen.FormMultiPageAction): # URLs and the like self.parentApp.import_model_paths = self.import_model_paths.value.split() - self.parentApp.convert_to_diffusers = self.convert_models.value != 0 + self.parentApp.convert_to_diffusers = self.convert_models.value == 1 # big chunk of dead code # was intended to be a status area in which output of installation steps (including tqdm) was logged in real time diff --git a/ldm/invoke/config/model_install_backend.py b/ldm/invoke/config/model_install_backend.py index 6a50dc5fcb..451e9b7e2b 100644 --- a/ldm/invoke/config/model_install_backend.py +++ b/ldm/invoke/config/model_install_backend.py @@ -68,8 +68,8 @@ def install_requested_models( purge_deleted: bool = False, config_file_path: Path = None, ): - config_file_path =config_file_path or default_config_file() - model_manager = ModelManager(OmegaConf.load(config_file_path),precision=precision) + config_file_path=config_file_path or default_config_file() + model_manager= ModelManager(OmegaConf.load(config_file_path),precision=precision) if remove_models and len(remove_models) > 0: print("== DELETING UNCHECKED STARTER MODELS ==") @@ -92,12 +92,17 @@ def install_requested_models( if external_models and len(external_models)>0: print("== INSTALLING EXTERNAL MODELS ==") for path_url_or_repo in external_models: - model_manager.heuristic_import( - path_url_or_repo, - convert=convert_to_diffusers, - commit_to_conf=config_file_path - ) - + try: + model_manager.heuristic_import( + path_url_or_repo, + convert=convert_to_diffusers, + commit_to_conf=config_file_path + ) + except KeyboardInterrupt: + sys.exit(-1) + except Exception: + pass + # ------------------------------------- def yes_or_no(prompt: str, default_yes=True): default = "y" if default_yes else "n" diff --git a/ldm/invoke/generator/diffusers_pipeline.py b/ldm/invoke/generator/diffusers_pipeline.py index 24626247cf..3bf777e23d 100644 --- a/ldm/invoke/generator/diffusers_pipeline.py +++ b/ldm/invoke/generator/diffusers_pipeline.py @@ -2,6 +2,7 @@ from __future__ import annotations import dataclasses import inspect +import psutil import secrets import sys from dataclasses import dataclass, field diff --git a/ldm/invoke/model_manager.py b/ldm/invoke/model_manager.py index 3f42161045..49213c29c7 100644 --- a/ldm/invoke/model_manager.py +++ b/ldm/invoke/model_manager.py @@ -643,7 +643,7 @@ class ModelManager(object): self.add_model(model_name, new_config, True) if commit_to_conf: self.commit(commit_to_conf) - return True + return model_name def import_ckpt_model( self, @@ -709,6 +709,8 @@ class ModelManager(object): ): model_path = None thing = path_url_or_repo # to save typing + + print(f'here i am; thing={thing}, convert={convert}') if thing.startswith(('http:','https:','ftp:')): print(f'* {thing} appears to be a URL') @@ -720,17 +722,23 @@ class ModelManager(object): elif Path(thing).is_dir() and Path(thing, 'model_index.json').exists(): print(f'* {thing} appears to be a diffusers file on disk') - self.import_diffusers_model(thing, commit_to_conf=commit_to_conf) + model_name = self.import_diffusers_model( + thing, + vae=dict(repo_id='stabilityai/sd-vae-ft-mse'), + commit_to_conf=commit_to_conf + ) elif Path(thing).is_dir(): print(f'* {thing} appears to be a directory. Will scan for models to import') for m in list(Path(thing).rglob('*.ckpt')) + list(Path(thing).rglob('*.safetensors')): - self.heuristic_import(m, convert, commit_to_conf=commit_to_conf) + print('***',m) + self.heuristic_import(str(m), convert, commit_to_conf=commit_to_conf) return elif re.match(r'^[\w.+-]+/[\w.+-]+$', thing): print(f'* {thing} appears to be a HuggingFace diffusers repo_id') - self.import_diffuser_model(thing, commit_to_conf=commit_to_conf) + model_name = self.import_diffuser_model(thing, commit_to_conf=commit_to_conf) + pipeline,_,_,_ = self._load_diffusers_model(self.config[model_name]) else: print(f"* {thing}: Unknown thing. Please provide a URL, file path, directory or HuggingFace repo_id") @@ -749,7 +757,7 @@ class ModelManager(object): print(f'* {thing} appears to be an SD-v2 model; model will be converted to diffusers format') model_config_file = Path(Globals.root,'configs/stable-diffusion/v2-inference-v.yaml') convert = True - elif re.search('inpaint', model_path, flags=re.IGNORECASE): + elif re.search('inpaint', str(model_path), flags=re.IGNORECASE): print(f'* {thing} appears to be an SD-v1 inpainting model') model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inpainting-inference.yaml') else: @@ -833,7 +841,7 @@ class ModelManager(object): return model_name = model_name or diffusers_path.name - model_description = model_description or "Optimized version of {model_name}" + model_description = model_description or f"Optimized version of {model_name}" print(f">> Optimizing {model_name} (30-60s)") try: # By passing the specified VAE to the conversion function, the autoencoder diff --git a/ldm/util.py b/ldm/util.py index d6d2c9e170..1bdcaba0d3 100644 --- a/ldm/util.py +++ b/ldm/util.py @@ -318,8 +318,6 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path else: dest.parent.mkdir(parents=True, exist_ok=True) - print(f'DEBUG: after many manipulations, dest={dest}') - header = {"Authorization": f"Bearer {access_token}"} if access_token else {} open_mode = "wb" exist_size = 0 From b1341bc611de23136582ca230c978438fabf8896 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 16 Feb 2023 03:22:25 -0500 Subject: [PATCH 09/36] fully functional and ready for review - quashed multiple bugs in model conversion and importing - found old issue in handling of resume of interrupted downloads - will require extensive testing --- ldm/invoke/CLI.py | 19 ++++++----- ldm/invoke/args.py | 8 ++++- ldm/invoke/config/invokeai_configure.py | 6 ++-- ldm/invoke/config/model_install.py | 5 +-- ldm/invoke/config/model_install_backend.py | 27 +++++++++++++-- ldm/invoke/model_manager.py | 37 +++++++++++--------- ldm/util.py | 39 +++++++++++++--------- 7 files changed, 91 insertions(+), 50 deletions(-) diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index a7cafe7571..2bfd29581d 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -142,12 +142,11 @@ def main(): report_model_error(opt, e) # try to autoconvert new models - # autoimport new .ckpt files + if path := opt.autoimport: + gen.model_manager.heuristic_import(str(path), convert=False, commit_to_conf=opt.conf) + if path := opt.autoconvert: - gen.model_manager.autoconvert_weights( - conf_path=opt.conf, - weights_directory=path, - ) + gen.model_manager.heuristic_import(str(path), convert=True, commit_to_conf=opt.conf) # web server loops forever if opt.web or opt.gui: @@ -581,7 +580,7 @@ def import_model(model_path: str, gen, opt, completer): (3) a huggingface repository id; or (4) a local directory containing a diffusers model. """ - model.path = model_path.replace('\\','/') # windows + model_path = model_path.replace('\\','/') # windows model_name = None if model_path.startswith(('http:','https:','ftp:')): @@ -653,7 +652,7 @@ def import_checkpoint_list(models: List[Path], gen, opt, completer)->List[str]: print(f'>> Model {model.stem} imported successfully') model_names.append(model_name) else: - printf('** Model {model} failed to import') + print(f'** Model {model} failed to import') print() return model_names @@ -709,7 +708,8 @@ def import_ckpt_model( vae = input('VAE file for this model (leave blank for none): ').strip() or None done = (not vae) or os.path.exists(vae) completer.complete_extensions(None) - + config_file = _ask_for_config_file(path_or_url, completer) + if not manager.import_ckpt_model( path_or_url, config = config_file, @@ -786,7 +786,8 @@ def optimize_model(model_name_or_path: Union[Path,str], gen, opt, completer): model_name_or_path = model_name_or_path.replace('\\','/') # windows manager = gen.model_manager ckpt_path = None - + original_config_file = None + if model_name_or_path == gen.model_name: print("** Can't convert the active model. !switch to another model first. **") return diff --git a/ldm/invoke/args.py b/ldm/invoke/args.py index d81de4f1ca..e79db94b68 100644 --- a/ldm/invoke/args.py +++ b/ldm/invoke/args.py @@ -527,11 +527,17 @@ class Args(object): default=False, help='Check for and blur potentially NSFW images. Use --no-nsfw_checker to disable.', ) + model_group.add_argument( + '--autoimport', + default=None, + type=str, + help='Check the indicated directory for .ckpt/.safetensors weights files at startup and import directly', + ) model_group.add_argument( '--autoconvert', default=None, type=str, - help='Check the indicated directory for .ckpt weights files at startup and import as optimized diffuser models', + help='Check the indicated directory for .ckpt/.safetensors weights files at startup and import as optimized diffuser models', ) model_group.add_argument( '--patchmatch', diff --git a/ldm/invoke/config/invokeai_configure.py b/ldm/invoke/config/invokeai_configure.py index 3ff555881c..93bf73c3d7 100755 --- a/ldm/invoke/config/invokeai_configure.py +++ b/ldm/invoke/config/invokeai_configure.py @@ -31,10 +31,8 @@ from transformers import ( ) import invokeai.configs as configs -from ldm.invoke.config.model_install import ( - download_from_hf, - select_and_download_models, -) +from ldm.invoke.config.model_install_backend import download_from_hf +from ldm.invoke.config.model_install import select_and_download_models from ldm.invoke.globals import Globals, global_config_dir from ldm.invoke.readline import generic_completer diff --git a/ldm/invoke/config/model_install.py b/ldm/invoke/config/model_install.py index 9c4a4d0e61..43b6aed1dd 100644 --- a/ldm/invoke/config/model_install.py +++ b/ldm/invoke/config/model_install.py @@ -100,7 +100,7 @@ class addModelsForm(npyscreen.FormMultiPageAction): ) self.add_widget_intelligent( npyscreen.TitleFixedText, - name="== UNINSTALLED STARTER MODELS (recommended models selected) ==", + name="== STARTER MODELS (recommended ones selected) ==", value="Select from a starter set of Stable Diffusion models from HuggingFace:", begin_entry_at=2, editable=False, @@ -221,6 +221,7 @@ class addModelsForm(npyscreen.FormMultiPageAction): ''' # starter models to install/remove starter_models = dict(map(lambda x: (self.starter_model_list[x], True), self.models_selected.value)) + self.parentApp.purge_deleted_models=False if hasattr(self,'previously_installed_models'): unchecked = [ self.previously_installed_models.values[x] @@ -243,7 +244,7 @@ class addModelsForm(npyscreen.FormMultiPageAction): # URLs and the like self.parentApp.import_model_paths = self.import_model_paths.value.split() - self.parentApp.convert_to_diffusers = self.convert_models.value == 1 + self.parentApp.convert_to_diffusers = self.convert_models.value[0] == 1 # big chunk of dead code # was intended to be a status area in which output of installation steps (including tqdm) was logged in real time diff --git a/ldm/invoke/config/model_install_backend.py b/ldm/invoke/config/model_install_backend.py index 451e9b7e2b..cdcef3bb6c 100644 --- a/ldm/invoke/config/model_install_backend.py +++ b/ldm/invoke/config/model_install_backend.py @@ -69,6 +69,9 @@ def install_requested_models( config_file_path: Path = None, ): config_file_path=config_file_path or default_config_file() + if not config_file_path.exists(): + open(config_file_path,'w') + model_manager= ModelManager(OmegaConf.load(config_file_path),precision=precision) if remove_models and len(remove_models) > 0: @@ -84,12 +87,20 @@ def install_requested_models( models=install_initial_models, access_token=None, precision=precision, - ) # for historical reasons, we don't use model manager here + ) # FIX: for historical reasons, we don't use model manager here update_config_file(successfully_downloaded, config_file_path) if len(successfully_downloaded) < len(install_initial_models): print("** Some of the model downloads were not successful") - if external_models and len(external_models)>0: + # due to above, we have to reload the model manager because conf file + # was changed behind its back + model_manager= ModelManager(OmegaConf.load(config_file_path),precision=precision) + + external_models = external_models or list() + if scan_directory: + external_models.append(str(scan_directory)) + + if len(external_models)>0: print("== INSTALLING EXTERNAL MODELS ==") for path_url_or_repo in external_models: try: @@ -102,6 +113,18 @@ def install_requested_models( sys.exit(-1) except Exception: pass + + if scan_at_startup and scan_directory.is_dir(): + argument = '--autoconvert' if convert_to_diffusers else '--autoimport' + initfile = Path(Globals.root, Globals.initfile) + replacement = Path(Globals.root, f'{Globals.initfile}.new') + with open(initfile,'r') as input: + with open(replacement,'w') as output: + while line := input.readline(): + if not line.startswith(argument): + output.writelines([line]) + output.writelines([f'{argument} {str(scan_directory)}']) + os.replace(replacement,initfile) # ------------------------------------- def yes_or_no(prompt: str, default_yes=True): diff --git a/ldm/invoke/model_manager.py b/ldm/invoke/model_manager.py index 49213c29c7..b46ba4d90a 100644 --- a/ldm/invoke/model_manager.py +++ b/ldm/invoke/model_manager.py @@ -707,21 +707,19 @@ class ModelManager(object): convert: bool= False, commit_to_conf: Path=None, ): - model_path = None + model_path: Path = None thing = path_url_or_repo # to save typing - print(f'here i am; thing={thing}, convert={convert}') - if thing.startswith(('http:','https:','ftp:')): - print(f'* {thing} appears to be a URL') + print(f'>> {thing} appears to be a URL') model_path = self._resolve_path(thing, 'models/ldm/stable-diffusion-v1') # _resolve_path does a download if needed elif Path(thing).is_file() and thing.endswith(('.ckpt','.safetensors')): - print(f'* {thing} appears to be a checkpoint file on disk') + print(f'>> {thing} appears to be a checkpoint file on disk') model_path = self._resolve_path(thing, 'models/ldm/stable-diffusion-v1') elif Path(thing).is_dir() and Path(thing, 'model_index.json').exists(): - print(f'* {thing} appears to be a diffusers file on disk') + print(f'>> {thing} appears to be a diffusers file on disk') model_name = self.import_diffusers_model( thing, vae=dict(repo_id='stabilityai/sd-vae-ft-mse'), @@ -729,39 +727,44 @@ class ModelManager(object): ) elif Path(thing).is_dir(): - print(f'* {thing} appears to be a directory. Will scan for models to import') + print(f'>> {thing} appears to be a directory. Will scan for models to import') for m in list(Path(thing).rglob('*.ckpt')) + list(Path(thing).rglob('*.safetensors')): print('***',m) self.heuristic_import(str(m), convert, commit_to_conf=commit_to_conf) return elif re.match(r'^[\w.+-]+/[\w.+-]+$', thing): - print(f'* {thing} appears to be a HuggingFace diffusers repo_id') + print(f'>> {thing} appears to be a HuggingFace diffusers repo_id') model_name = self.import_diffuser_model(thing, commit_to_conf=commit_to_conf) pipeline,_,_,_ = self._load_diffusers_model(self.config[model_name]) else: - print(f"* {thing}: Unknown thing. Please provide a URL, file path, directory or HuggingFace repo_id") + print(f">> {thing}: Unknown thing. Please provide a URL, file path, directory or HuggingFace repo_id") # Model_path is set in the event of a legacy checkpoint file. # If not set, we're all done if not model_path: return + if model_path.stem in self.config: #already imported + return + # another round of heuristics to guess the correct config file. - model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inpainting-inference.yaml') + model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml') checkpoint = safetensors.torch.load_file(model_path) if model_path.suffix == '.safetensors' else torch.load(model_path) key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024: - print(f'* {thing} appears to be an SD-v2 model; model will be converted to diffusers format') + print(f'>> {thing} appears to be an SD-v2 model; model will be converted to diffusers format') model_config_file = Path(Globals.root,'configs/stable-diffusion/v2-inference-v.yaml') convert = True + elif re.search('inpaint', str(model_path), flags=re.IGNORECASE): - print(f'* {thing} appears to be an SD-v1 inpainting model') + print(f'>> {thing} appears to be an SD-v1 inpainting model') model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inpainting-inference.yaml') + else: - print(f'* {thing} appears to be an SD-v1 model') + print(f'>> {thing} appears to be an SD-v1 model') if convert: diffuser_path = Path(Globals.root, 'models',Globals.converted_ckpts_dir, model_path.stem) @@ -776,10 +779,12 @@ class ModelManager(object): self.import_ckpt_model( model_path, config=model_config_file, - vae=Path(Globals.root,'models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt'), + vae=str(Path(Globals.root,'models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt')), commit_to_conf=commit_to_conf, ) - + + # this is a defunct method, superseded by heuristic_import() + # left here during transition def autoconvert_weights ( self, conf_path: Path, @@ -799,7 +804,7 @@ class ModelManager(object): ckpt_files = dict() for root, dirs, files in os.walk(weights_directory): for f in files: - if not f.endswith(".ckpt"): + if not f.endswith((".ckpt",".safetensors")): continue basename = Path(f).stem dest = Path(dest_directory, basename) diff --git a/ldm/util.py b/ldm/util.py index 1bdcaba0d3..a60106967d 100644 --- a/ldm/util.py +++ b/ldm/util.py @@ -306,8 +306,12 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path dest/filename :param access_token: Access token to access this resource ''' - resp = requests.get(url, stream=True) - total = int(resp.headers.get("content-length", 0)) + header = {"Authorization": f"Bearer {access_token}"} if access_token else {} + open_mode = "wb" + exist_size = 0 + + resp = requests.get(url, header, stream=True) + content_length = int(resp.headers.get("content-length", 0)) if dest.is_dir(): try: @@ -318,39 +322,42 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path else: dest.parent.mkdir(parents=True, exist_ok=True) - header = {"Authorization": f"Bearer {access_token}"} if access_token else {} - open_mode = "wb" - exist_size = 0 if dest.exists(): exist_size = dest.stat().st_size header["Range"] = f"bytes={exist_size}-" open_mode = "ab" + resp = requests.get(url, headers=header, stream=True) # new request with range + + if exist_size > content_length: + print('* corrupt existing file found. re-downloading') + os.remove(dest) + exist_size = 0 if ( - resp.status_code == 416 - ): # "range not satisfiable", which means nothing to return + resp.status_code == 416 or exist_size == content_length + ): print(f"* {dest}: complete file found. Skipping.") return dest + elif resp.status_code == 206 or exist_size > 0: + print(f"* {dest}: partial file found. Resuming...") elif resp.status_code != 200: print(f"** An error occurred during downloading {dest}: {resp.reason}") - elif exist_size > 0: - print(f"* {dest}: partial file found. Resuming...") else: print(f"* {dest}: Downloading...") try: - if total < 2000: + if content_length < 2000: print(f"*** ERROR DOWNLOADING {url}: {resp.text}") return None with open(dest, open_mode) as file, tqdm( - desc=str(dest), - initial=exist_size, - total=total + exist_size, - unit="iB", - unit_scale=True, - unit_divisor=1000, + desc=str(dest), + initial=exist_size, + total=content_length, + unit="iB", + unit_scale=True, + unit_divisor=1000, ) as bar: for data in resp.iter_content(chunk_size=1024): size = file.write(data) From c5cc832304b3ebff1d19c33f125337ae6972f79d Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 16 Feb 2023 12:52:07 -0500 Subject: [PATCH 10/36] check maximum value of python version as well as minimum --- installer/install.sh.in | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/installer/install.sh.in b/installer/install.sh.in index e78023dfcd..25b0a2035a 100755 --- a/installer/install.sh.in +++ b/installer/install.sh.in @@ -9,13 +9,16 @@ cd $scriptdir function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; } MINIMUM_PYTHON_VERSION=3.9.0 +MAXIMUM_PYTHON_VERSION=3.11.0 PYTHON="" -for candidate in python3.10 python3.9 python3 python python3.11 ; do +for candidate in python3.10 python3.9 python3 python ; do if ppath=`which $candidate`; then python_version=$($ppath -V | awk '{ print $2 }') if [ $(version $python_version) -ge $(version "$MINIMUM_PYTHON_VERSION") ]; then - PYTHON=$ppath - break + if [ $(version $python_version) -lt $(version "$MAXIMUM_PYTHON_VERSION") ]; then + PYTHON=$ppath + break + fi fi fi done From 6217edcb6c2389a8fdad97050d0613d54b4ed3e0 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 16 Feb 2023 12:55:13 -0500 Subject: [PATCH 11/36] tweak wording of python version requirements --- docs/installation/010_INSTALL_AUTOMATED.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/installation/010_INSTALL_AUTOMATED.md b/docs/installation/010_INSTALL_AUTOMATED.md index b7e4b03ed1..e0b38d3aa0 100644 --- a/docs/installation/010_INSTALL_AUTOMATED.md +++ b/docs/installation/010_INSTALL_AUTOMATED.md @@ -40,9 +40,10 @@ experimental versions later. this, open up a command-line window ("Terminal" on Linux and Macintosh, "Command" or "Powershell" on Windows) and type `python --version`. If Python is installed, it will print out the version - number. If it is version `3.9.1` or `3.10.x`, you meet - requirements. - + number. If it is version `3.9.*` or `3.10.*`, you meet + requirements. We do not recommend using Python 3.11 or higher, + as not all the libraries that InvokeAI depends on work properly + with this version. !!! warning "What to do if you have an unsupported version" @@ -50,8 +51,7 @@ experimental versions later. and download the appropriate installer package for your platform. We recommend [Version 3.10.9](https://www.python.org/downloads/release/python-3109/), - which has been extensively tested with InvokeAI. At this time - we do not recommend Python 3.11. + which has been extensively tested with InvokeAI. _Please select your platform in the section below for platform-specific setup requirements._ From 5d617ce63df542ea94ad4c56ea53c9f3df6c75f5 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 16 Feb 2023 20:03:59 -0500 Subject: [PATCH 12/36] rebuild front end --- invokeai/frontend/stats.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/stats.html b/invokeai/frontend/stats.html index d374d5865d..9ba6f73f0d 100644 --- a/invokeai/frontend/stats.html +++ b/invokeai/frontend/stats.html @@ -6157,7 +6157,7 @@ var drawChart = (function (exports) {