From 92e512b8b680180160f8b8c174d2245d0ed0c1ed Mon Sep 17 00:00:00 2001 From: Mary Hipp Date: Mon, 27 Mar 2023 10:49:52 -0400 Subject: [PATCH 1/3] add package mode option for i18next --- invokeai/frontend/web/src/i18n.ts | 36 ++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/invokeai/frontend/web/src/i18n.ts b/invokeai/frontend/web/src/i18n.ts index 9b655b28be..faa30f7289 100644 --- a/invokeai/frontend/web/src/i18n.ts +++ b/invokeai/frontend/web/src/i18n.ts @@ -1,22 +1,38 @@ import i18n from 'i18next'; import LanguageDetector from 'i18next-browser-languagedetector'; import Backend from 'i18next-http-backend'; - import { initReactI18next } from 'react-i18next'; -i18n - .use(Backend) - .use(LanguageDetector) - .use(initReactI18next) - .init({ - fallbackLng: 'en', - debug: false, - backend: { - loadPath: '/locales/{{lng}}.json', + +import translationEN from '../dist/locales/en.json'; + +if (import.meta.env.MODE === 'package') { + i18n.use(initReactI18next).init({ + lng: 'en', + resources: { + en: { translation: translationEN }, }, + debug: false, interpolation: { escapeValue: false, }, returnNull: false, }); +} else { + i18n + .use(Backend) + .use(LanguageDetector) + .use(initReactI18next) + .init({ + fallbackLng: 'en', + debug: false, + backend: { + loadPath: '/locales/{{lng}}.json', + }, + interpolation: { + escapeValue: false, + }, + returnNull: false, + }); +} export default i18n; From fe5d9ad171f5791fcd98281a86050e4c9d4d5e7a Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 27 Mar 2023 11:27:45 -0400 Subject: [PATCH 2/3] improve importation and conversion of legacy checkpoint files A long-standing issue with importing legacy checkpoints (both ckpt and safetensors) is that the user has to identify the correct config file, either by providing its path or by selecting which type of model the checkpoint is (e.g. "v1 inpainting"). In addition, some users wish to provide custom VAEs for use with the model. Currently this is done in the WebUI by importing the model, editing it, and then typing in the path to the VAE. To improve the user experience, the model manager's `heuristic_import()` method has been enhanced as follows: 1. When initially called, the caller can pass a config file path, in which case it will be used. 2. If no config file provided, the method looks for a .yaml file in the same directory as the model which bears the same basename. e.g. ``` my-new-model.safetensors my-new-model.yaml ``` The yaml file is then used as the configuration file for importation and conversion. 3. If no such file is found, then the method opens up the checkpoint and probes it to determine whether it is V1, V1-inpaint or V2. If it is a V1 format, then the appropriate v1-inference.yaml config file is used. Unfortunately there are two V2 variants that cannot be distinguished by introspection. 4. If the probe algorithm is unable to determine the model type, then its last-ditch effort is to execute an optional callback function that can be provided by the caller. This callback, named `config_file_callback` receives the path to the legacy checkpoint and returns the path to the config file to use. The CLI uses to put up a multiple choice prompt to the user. The WebUI **could** use this to prompt the user to choose from a radio-button selection. 5. If the config file cannot be determined, then the import is abandoned. The user can attach a custom VAE to the imported and converted model by copying the desired VAE into the same directory as the file to be imported, and giving it the same basename. E.g.: ``` my-new-model.safetensors my-new-model.vae.pt ``` For this to work, the VAE must end with ".vae.pt", ".vae.ckpt", or ".vae.safetensors". The indicated VAE will be converted into diffusers format and stored with the converted models file, so the ".pt" file can be deleted after conversion. No facility is currently provided to swap a diffusers VAE at import time, but this can be done after the fact using the WebUI and CLI's model editing functions. --- .../backend/model_management/model_manager.py | 127 +++++++++++------- invokeai/frontend/CLI/CLI.py | 7 +- 2 files changed, 77 insertions(+), 57 deletions(-) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 2f8380a17d..4a2bb56270 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -18,7 +18,7 @@ import warnings from enum import Enum from pathlib import Path from shutil import move, rmtree -from typing import Any, Optional, Union +from typing import Any, Optional, Union, Callable import safetensors import safetensors.torch @@ -630,14 +630,13 @@ class ModelManager(object): def heuristic_import( self, path_url_or_repo: str, - convert: bool = True, model_name: str = None, description: str = None, model_config_file: Path = None, commit_to_conf: Path = None, + config_file_callback: Callable[[Path], Path] = None, ) -> str: - """ - Accept a string which could be: + """Accept a string which could be: - a HF diffusers repo_id - a URL pointing to a legacy .ckpt or .safetensors file - a local path pointing to a legacy .ckpt or .safetensors file @@ -651,16 +650,20 @@ class ModelManager(object): The model_name and/or description can be provided. If not, they will be generated automatically. - If convert is true, legacy models will be converted to diffusers - before importing. - If commit_to_conf is provided, the newly loaded model will be written to the `models.yaml` file at the indicated path. Otherwise, the changes will only remain in memory. - The (potentially derived) name of the model is returned on success, or None - on failure. When multiple models are added from a directory, only the last - imported one is returned. + The routine will do its best to figure out the config file + needed to convert legacy checkpoint file, but if it can't it + will call the config_file_callback routine, if provided. The + callback accepts a single argument, the Path to the checkpoint + file, and returns a Path to the config file to use. + + The (potentially derived) name of the model is returned on + success, or None on failure. When multiple models are added + from a directory, only the last imported one is returned. + """ model_path: Path = None thing = path_url_or_repo # to save typing @@ -707,7 +710,7 @@ class ModelManager(object): Path(thing).rglob("*.safetensors") ): if model_name := self.heuristic_import( - str(m), convert, commit_to_conf=commit_to_conf + str(m), commit_to_conf=commit_to_conf ): print(f" >> {model_name} successfully imported") return model_name @@ -735,7 +738,7 @@ class ModelManager(object): # another round of heuristics to guess the correct config file. checkpoint = None - if model_path.suffix.endswith((".ckpt",".pt")): + if model_path.suffix in [".ckpt",".pt"]: self.scan_model(model_path,model_path) checkpoint = torch.load(model_path) else: @@ -743,43 +746,62 @@ class ModelManager(object): # additional probing needed if no config file provided if model_config_file is None: - model_type = self.probe_model_type(checkpoint) - if model_type == SDLegacyType.V1: - print(" | SD-v1 model detected") - model_config_file = Path( - Globals.root, "configs/stable-diffusion/v1-inference.yaml" - ) - elif model_type == SDLegacyType.V1_INPAINT: - print(" | SD-v1 inpainting model detected") - model_config_file = Path( - Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml" - ) - elif model_type == SDLegacyType.V2_v: - print( - " | SD-v2-v model detected; model will be converted to diffusers format" - ) - model_config_file = Path( - Globals.root, "configs/stable-diffusion/v2-inference-v.yaml" - ) - convert = True - elif model_type == SDLegacyType.V2_e: - print( - " | SD-v2-e model detected; model will be converted to diffusers format" - ) - model_config_file = Path( - Globals.root, "configs/stable-diffusion/v2-inference.yaml" - ) - convert = True - elif model_type == SDLegacyType.V2: - print( - f"** {thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide configuration file path." - ) - return + # look for a like-named .yaml file in same directory + if model_path.with_suffix(".yaml").exists(): + model_config_file = model_path.with_suffix(".yaml") + print(f" | Using config file {model_config_file.name}") + else: - print( - f"** {thing} is a legacy checkpoint file but not a known Stable Diffusion model. Please provide configuration file path." - ) - return + model_type = self.probe_model_type(checkpoint) + if model_type == SDLegacyType.V1: + print(" | SD-v1 model detected") + model_config_file = Path( + Globals.root, "configs/stable-diffusion/v1-inference.yaml" + ) + elif model_type == SDLegacyType.V1_INPAINT: + print(" | SD-v1 inpainting model detected") + model_config_file = Path( + Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml" + ) + elif model_type == SDLegacyType.V2_v: + print( + " | SD-v2-v model detected" + ) + model_config_file = Path( + Globals.root, "configs/stable-diffusion/v2-inference-v.yaml" + ) + elif model_type == SDLegacyType.V2_e: + print( + " | SD-v2-e model detected" + ) + model_config_file = Path( + Globals.root, "configs/stable-diffusion/v2-inference.yaml" + ) + elif model_type == SDLegacyType.V2: + print( + f"** {thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide configuration file path." + ) + return + else: + print( + f"** {thing} is a legacy checkpoint file but not a known Stable Diffusion model. Please provide configuration file path." + ) + return + + if not model_config_file and config_file_callback: + model_config_file = config_file_callback(model_path) + + # despite our best efforts, we could not find a model config file, so give up + if not model_config_file: + return + + # look for a custom vae, a like-named file ending with .vae in the same directory + vae_path = None + for suffix in ["pt", "ckpt", "safetensors"]: + if (model_path.with_suffix(f".vae.{suffix}")).exists(): + vae_path = model_path.with_suffix(f".vae.{suffix}") + print(f" | Using VAE file {vae_path.name}") + vae = None if vae_path else dict(repo_id="stabilityai/sd-vae-ft-mse") diffuser_path = Path( Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem @@ -787,7 +809,8 @@ class ModelManager(object): model_name = self.convert_and_import( model_path, diffusers_path=diffuser_path, - vae=dict(repo_id="stabilityai/sd-vae-ft-mse"), + vae=vae, + vae_path=str(vae_path), model_name=model_name, model_description=description, original_config_file=model_config_file, @@ -829,8 +852,8 @@ class ModelManager(object): return model_name = model_name or diffusers_path.name - model_description = model_description or f"Optimized version of {model_name}" - print(f">> Optimizing {model_name} (30-60s)") + model_description = model_description or f"Converted version of {model_name}" + print(f" | Converting {model_name} to diffusers (30-60s)") try: # By passing the specified VAE to the conversion function, the autoencoder # will be built into the model rather than tacked on afterward via the config file @@ -848,7 +871,7 @@ class ModelManager(object): scan_needed=scan_needed, ) print( - f" | Success. Optimized model is now located at {str(diffusers_path)}" + f" | Success. Converted model is now located at {str(diffusers_path)}" ) print(f" | Writing new config file entry for {model_name}") new_config = dict( diff --git a/invokeai/frontend/CLI/CLI.py b/invokeai/frontend/CLI/CLI.py index 173ff3ecc1..22e1bbd49d 100644 --- a/invokeai/frontend/CLI/CLI.py +++ b/invokeai/frontend/CLI/CLI.py @@ -626,7 +626,7 @@ def set_default_output_dir(opt: Args, completer: Completer): completer.set_default_dir(opt.outdir) -def import_model(model_path: str, gen, opt, completer, convert=False): +def import_model(model_path: str, gen, opt, completer): """ model_path can be (1) a URL to a .ckpt file; (2) a local .ckpt file path; (3) a huggingface repository id; or (4) a local directory containing a @@ -657,7 +657,6 @@ def import_model(model_path: str, gen, opt, completer, convert=False): model_path, model_name=model_name, description=model_desc, - convert=convert, ) if not imported_name: @@ -666,7 +665,6 @@ def import_model(model_path: str, gen, opt, completer, convert=False): model_path, model_name=model_name, description=model_desc, - convert=convert, model_config_file=config_file, ) if not imported_name: @@ -757,7 +755,6 @@ def _get_model_name_and_desc( ) return model_name, model_description - def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer): model_name_or_path = model_name_or_path.replace("\\", "/") # windows manager = gen.model_manager @@ -788,7 +785,7 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer): ) else: try: - import_model(model_name_or_path, gen, opt, completer, convert=True) + import_model(model_name_or_path, gen, opt, completer) except KeyboardInterrupt: return From 9a7580dedd82227161cd0d8ad1524a5de3bf5e80 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 28 Mar 2023 00:17:20 -0400 Subject: [PATCH 3/3] fix bugs in online ckpt conversion of 2.0 models This commit fixes bugs related to the on-the-fly conversion and loading of legacy checkpoint models built on SD-2.0 base. - When legacy checkpoints built on SD-2.0 models were converted on-the-fly using --ckpt_convert, generation would crash with a precision incompatibility error. --- .../backend/model_management/convert_ckpt_to_diffusers.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index 3547fcee7b..b46586611d 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -1264,10 +1264,10 @@ def load_pipeline_from_original_stable_diffusion_ckpt( cache_dir=cache_dir, ) pipe = pipeline_class( - vae=vae, - text_encoder=text_model, + vae=vae.to(precision), + text_encoder=text_model.to(precision), tokenizer=tokenizer, - unet=unet, + unet=unet.to(precision), scheduler=scheduler, safety_checker=None, feature_extractor=None,