From 9968ff2893c13b73f7ff031ea690f2344c8bab24 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 10:30:27 -0400 Subject: [PATCH 01/20] fix relative model paths to be against config.models_path, not root --- invokeai/app/services/config.py | 3 +- .../backend/install/invokeai_configure.py | 2 +- .../backend/install/model_install_backend.py | 8 +++-- .../convert_ckpt_to_diffusers.py | 6 ++-- .../backend/model_management/model_manager.py | 29 ++++++++++------- .../models/stable_diffusion.py | 2 +- invokeai/frontend/install/model_install.py | 31 ++++++------------- 7 files changed, 39 insertions(+), 42 deletions(-) diff --git a/invokeai/app/services/config.py b/invokeai/app/services/config.py index 98855fe879..a3508e11ba 100644 --- a/invokeai/app/services/config.py +++ b/invokeai/app/services/config.py @@ -171,7 +171,6 @@ from pydantic import BaseSettings, Field, parse_obj_as from typing import ClassVar, Dict, List, Set, Literal, Union, get_origin, get_type_hints, get_args INIT_FILE = Path("invokeai.yaml") -MODEL_CORE = Path("models/core") DB_FILE = Path("invokeai.db") LEGACY_INIT_FILE = Path("invokeai.init") @@ -357,7 +356,7 @@ def _find_root() -> Path: venv = Path(os.environ.get("VIRTUAL_ENV") or ".") if os.environ.get("INVOKEAI_ROOT"): root = Path(os.environ.get("INVOKEAI_ROOT")).resolve() - elif any([(venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE, MODEL_CORE]]): + elif any([(venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE]]): root = (venv.parent).resolve() else: root = Path("~/invokeai").expanduser().resolve() diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index 972e6668c4..4bf2a484a1 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -181,7 +181,7 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th def download_conversion_models(): - target_dir = config.root_path / "models/core/convert" + target_dir = config.models_path / "core/convert" kwargs = dict() # for future use try: logger.info("Downloading core tokenizers and text encoders") diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index b3ab88b5dd..a96c5ff776 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -103,6 +103,7 @@ class ModelInstall(object): access_token: str = None, ): self.config = config + # force model manager to be a singleton self.mgr = model_manager or ModelManager(config.model_conf_path) self.datasets = OmegaConf.load(Dataset_path) self.prediction_helper = prediction_type_helper @@ -273,6 +274,7 @@ class ModelInstall(object): logger.error(f"Unable to download {url}. Skipping.") info = ModelProbe().heuristic_probe(location) dest = self.config.models_path / info.base_type.value / info.model_type.value / location.name + dest.parent.mkdir(parents=True, exist_ok=True) models_path = shutil.move(location, dest) # staged version will be garbage-collected at this time @@ -346,7 +348,7 @@ class ModelInstall(object): if key in self.datasets: description = self.datasets[key].get("description") or description - rel_path = self.relative_to_root(path) + rel_path = self.relative_to_root(path,self.config.models_path) attributes = dict( path=str(rel_path), @@ -386,8 +388,8 @@ class ModelInstall(object): attributes.update(dict(config=str(legacy_conf))) return attributes - def relative_to_root(self, path: Path) -> Path: - root = self.config.root_path + def relative_to_root(self, path: Path, root: None) -> Path: + root = root or self.config.root_path if path.is_relative_to(root): return path.relative_to(root) else: diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index 2c62b8b192..1640270dbf 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -63,7 +63,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from invokeai.backend.util.logging import InvokeAILogger -from invokeai.app.services.config import InvokeAIAppConfig, MODEL_CORE +from invokeai.app.services.config import InvokeAIAppConfig from picklescan.scanner import scan_file_path from .models import BaseModelType, ModelVariantType @@ -81,7 +81,7 @@ if is_accelerate_available(): from accelerate.utils import set_module_tensor_to_device logger = InvokeAILogger.getLogger(__name__) -CONVERT_MODEL_ROOT = InvokeAIAppConfig.get_config().root_path / MODEL_CORE / "convert" +CONVERT_MODEL_ROOT = InvokeAIAppConfig.get_config().models_path / "core/convert" def shave_segments(path, n_shave_prefix_segments=1): @@ -1281,7 +1281,7 @@ def download_from_original_stable_diffusion_ckpt( original_config = OmegaConf.load(original_config_file) if ( model_version == BaseModelType.StableDiffusion2 - and original_config["model"]["params"]["parameterization"] == "v" + and original_config["model"]["params"].get("parameterization") == "v" ): prediction_type = "v_prediction" upcast_attention = True diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 2a82061a97..d55ca55484 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -456,7 +456,7 @@ class ModelManager(object): raise ModelNotFoundException(f"Model not found - {model_key}") model_config = self.models[model_key] - model_path = self.app_config.root_path / model_config.path + model_path = self.app_config.models_path / model_config.path if not model_path.exists(): if model_class.save_to_config: @@ -623,7 +623,7 @@ class ModelManager(object): self.cache.uncache_model(cache_id) # if model inside invoke models folder - delete files - model_path = self.app_config.root_path / model_cfg.path + model_path = self.app_config.models_path / model_cfg.path cache_path = self._get_model_cache_path(model_path) if cache_path.exists(): rmtree(str(cache_path)) @@ -656,8 +656,8 @@ class ModelManager(object): """ # relativize paths as they go in - this makes it easier to move the root directory around if path := model_attributes.get("path"): - if Path(path).is_relative_to(self.app_config.root_path): - model_attributes["path"] = str(Path(path).relative_to(self.app_config.root_path)) + if Path(path).is_relative_to(self.app_config.models_path): + model_attributes["path"] = str(Path(path).relative_to(self.app_config.models_path)) model_class = MODEL_CLASSES[base_model][model_type] model_config = model_class.create_config(**model_attributes) @@ -732,7 +732,7 @@ class ModelManager(object): / new_name ) move(old_path, new_path) - model_cfg.path = str(new_path.relative_to(self.app_config.root_path)) + model_cfg.path = str(new_path.relative_to(self.app_config.models_path)) # clean up caches old_model_cache = self._get_model_cache_path(old_path) @@ -795,7 +795,7 @@ class ModelManager(object): info["path"] = ( str(new_diffusers_path) if dest_directory - else str(new_diffusers_path.relative_to(self.app_config.root_path)) + else str(new_diffusers_path.relative_to(self.app_config.models_path)) ) info.pop("config") @@ -883,10 +883,17 @@ class ModelManager(object): new_models_found = False self.logger.info(f"Scanning {self.app_config.models_path} for new models") - with Chdir(self.app_config.root_path): + with Chdir(self.app_config.models_path): for model_key, model_config in list(self.models.items()): model_name, cur_base_model, cur_model_type = self.parse_key(model_key) - model_path = self.app_config.root_path.absolute() / model_config.path + + # Patch for relative path bug in older models.yaml - paths should not + # be starting with a hard-coded 'models'. This will also fix up + # models.yaml when committed. + if model_config.path.startswith('models'): + model_config.path = str(Path(*Path(model_config.path).parts[1:])) + + model_path = self.app_config.models_path.absolute() / model_config.path if not model_path.exists(): model_class = MODEL_CLASSES[cur_base_model][cur_model_type] if model_class.save_to_config: @@ -919,8 +926,8 @@ class ModelManager(object): if model_key in self.models: raise DuplicateModelException(f"Model with key {model_key} added twice") - if model_path.is_relative_to(self.app_config.root_path): - model_path = model_path.relative_to(self.app_config.root_path) + if model_path.is_relative_to(self.app_config.models_path): + model_path = model_path.relative_to(self.app_config.models_path) model_config: ModelConfigBase = model_class.probe_config(str(model_path)) self.models[model_key] = model_config @@ -971,7 +978,7 @@ class ModelManager(object): # LS: hacky # Patch in the SD VAE from core so that it is available for use by the UI try: - self.heuristic_import({config.root_path / "models/core/convert/sd-vae-ft-mse"}) + self.heuristic_import({config.models_path / "core/convert/sd-vae-ft-mse"}) except: pass diff --git a/invokeai/backend/model_management/models/stable_diffusion.py b/invokeai/backend/model_management/models/stable_diffusion.py index 76b4833f9c..e672067545 100644 --- a/invokeai/backend/model_management/models/stable_diffusion.py +++ b/invokeai/backend/model_management/models/stable_diffusion.py @@ -259,7 +259,7 @@ def _convert_ckpt_and_cache( """ app_config = InvokeAIAppConfig.get_config() - weights = app_config.root_path / model_config.path + weights = app_config.models_path / model_config.path config_file = app_config.root_path / model_config.config output_path = Path(output_path) diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index ea9efe1908..d548ec7026 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -153,7 +153,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): BufferBox, name="Log Messages", editable=False, - max_height=8, + max_height=15, ) self.nextrely += 1 @@ -399,7 +399,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): self.ok_button.hidden = True self.display() - # for communication with the subprocess + # TO DO: Spawn a worker thread, not a subprocess parent_conn, child_conn = Pipe() p = Process( target=process_and_execute, @@ -414,7 +414,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): self.subprocess_connection = parent_conn self.subprocess = p app.install_selections = InstallSelections() - # process_and_execute(app.opt, app.install_selections) def on_back(self): self.parentApp.switchFormPrevious() @@ -489,8 +488,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): # rebuild the form, saving and restoring some of the fields that need to be preserved. saved_messages = self.monitor.entry_widget.values - # autoload_dir = str(config.root_path / self.pipeline_models['autoload_directory'].value) - # autoscan = self.pipeline_models['autoscan_on_startup'].value app.main_form = app.addForm( "MAIN", @@ -544,13 +541,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): if downloads := section.get("download_ids"): selections.install_models.extend(downloads.value.split()) - # load directory and whether to scan on startup - # if self.parentApp.autoload_pending: - # selections.scan_directory = str(config.root_path / self.pipeline_models['autoload_directory'].value) - # self.parentApp.autoload_pending = False - # selections.autoscan_on_startup = self.pipeline_models['autoscan_on_startup'].value - - class AddModelApplication(npyscreen.NPSAppManaged): def __init__(self, opt): super().__init__() @@ -635,10 +625,14 @@ def _ask_user_for_pt_tui(model_path: Path, tui_conn: Connection) -> SchedulerPre # -------------------------------------------------------- def process_and_execute( - opt: Namespace, - selections: InstallSelections, - conn_out: Connection = None, + opt: Namespace, + selections: InstallSelections, + conn_out: Connection = None, ): + # need to reinitialize config in subprocess + config = InvokeAIAppConfig.get_config() + config.parse_args() + # set up so that stderr is sent to conn_out if conn_out: translator = StderrToMessage(conn_out) @@ -647,7 +641,7 @@ def process_and_execute( logger = InvokeAILogger.getLogger() logger.handlers.clear() logger.addHandler(logging.StreamHandler(translator)) - + installer = ModelInstall(config, prediction_type_helper=lambda x: ask_user_for_prediction_type(x, conn_out)) installer.install(selections) @@ -685,9 +679,6 @@ def select_and_download_models(opt: Namespace): precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device())) config.precision = precision helper = lambda x: ask_user_for_prediction_type(x) - # if do_listings(opt): - # pass - installer = ModelInstall(config, prediction_type_helper=helper) if opt.list_models: installer.list_models(opt.list_models) @@ -706,8 +697,6 @@ def select_and_download_models(opt: Namespace): # needed to support the probe() method running under a subprocess torch.multiprocessing.set_start_method("spawn") - # the third argument is needed in the Windows 11 environment in - # order to launch and resize a console window running this program set_min_terminal_size(MIN_COLS, MIN_LINES) installApp = AddModelApplication(opt) try: From d79d5a4ff72336babebf4d34f00368865d03c593 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 10:45:26 -0400 Subject: [PATCH 02/20] modest refactoring --- invokeai/backend/model_management/model_manager.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index d55ca55484..f5bbee18b4 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -423,7 +423,7 @@ class ModelManager(object): return (model_name, base_model, model_type) def _get_model_cache_path(self, model_path): - return self.app_config.models_path / ".cache" / hashlib.md5(str(model_path).encode()).hexdigest() + return self.resolve_model_path(".cache" + '/' + hashlib.md5(str(model_path).encode()).hexdigest()) @classmethod def initialize_model_config(cls, config_path: Path): @@ -456,7 +456,7 @@ class ModelManager(object): raise ModelNotFoundException(f"Model not found - {model_key}") model_config = self.models[model_key] - model_path = self.app_config.models_path / model_config.path + model_path = self.resolve_model_path(model_config.path) if not model_path.exists(): if model_class.save_to_config: @@ -623,7 +623,7 @@ class ModelManager(object): self.cache.uncache_model(cache_id) # if model inside invoke models folder - delete files - model_path = self.app_config.models_path / model_cfg.path + model_path = self.resolve_model_path(model_cfg.path) cache_path = self._get_model_cache_path(model_path) if cache_path.exists(): rmtree(str(cache_path)) @@ -782,7 +782,7 @@ class ModelManager(object): **submodel, ) checkpoint_path = self.app_config.root_path / info["path"] - old_diffusers_path = self.app_config.models_path / model.location + old_diffusers_path = self.resolve_model_path(model.location) new_diffusers_path = ( dest_directory or self.app_config.models_path / base_model.value / model_type.value ) / model_name @@ -810,6 +810,9 @@ class ModelManager(object): return result + def resolve_model_path(self, path: str) -> Path: + return self.app_config.models_path / path + def search_models(self, search_folder): self.logger.info(f"Finding Models In: {search_folder}") models_folder_ckpt = Path(search_folder).glob("**/*.ckpt") From 982a568349ac43d8bef5c90b685d4661777fc477 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 10:47:55 -0400 Subject: [PATCH 03/20] blackify pr --- invokeai/app/invocations/params.py | 6 +++--- invokeai/backend/install/model_install_backend.py | 2 +- invokeai/backend/model_management/model_manager.py | 8 ++++---- invokeai/frontend/install/model_install.py | 9 +++++---- 4 files changed, 13 insertions(+), 12 deletions(-) diff --git a/invokeai/app/invocations/params.py b/invokeai/app/invocations/params.py index 1a03baa7cc..513eb8762f 100644 --- a/invokeai/app/invocations/params.py +++ b/invokeai/app/invocations/params.py @@ -6,8 +6,7 @@ from pydantic import Field from invokeai.app.invocations.prompt import PromptOutput -from .baseinvocation import (BaseInvocation, BaseInvocationOutput, - InvocationConfig, InvocationContext) +from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationConfig, InvocationContext from .math import FloatOutput, IntOutput # Pass-through parameter nodes - used by subgraphs @@ -68,6 +67,7 @@ class ParamStringInvocation(BaseInvocation): def invoke(self, context: InvocationContext) -> StringOutput: return StringOutput(text=self.text) + class ParamPromptInvocation(BaseInvocation): """A prompt input parameter""" @@ -80,4 +80,4 @@ class ParamPromptInvocation(BaseInvocation): } def invoke(self, context: InvocationContext) -> PromptOutput: - return PromptOutput(prompt=self.prompt) \ No newline at end of file + return PromptOutput(prompt=self.prompt) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index a96c5ff776..67ca508fe1 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -348,7 +348,7 @@ class ModelInstall(object): if key in self.datasets: description = self.datasets[key].get("description") or description - rel_path = self.relative_to_root(path,self.config.models_path) + rel_path = self.relative_to_root(path, self.config.models_path) attributes = dict( path=str(rel_path), diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index f5bbee18b4..d377e86e31 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -423,7 +423,7 @@ class ModelManager(object): return (model_name, base_model, model_type) def _get_model_cache_path(self, model_path): - return self.resolve_model_path(".cache" + '/' + hashlib.md5(str(model_path).encode()).hexdigest()) + return self.resolve_model_path(".cache" + "/" + hashlib.md5(str(model_path).encode()).hexdigest()) @classmethod def initialize_model_config(cls, config_path: Path): @@ -889,13 +889,13 @@ class ModelManager(object): with Chdir(self.app_config.models_path): for model_key, model_config in list(self.models.items()): model_name, cur_base_model, cur_model_type = self.parse_key(model_key) - + # Patch for relative path bug in older models.yaml - paths should not # be starting with a hard-coded 'models'. This will also fix up # models.yaml when committed. - if model_config.path.startswith('models'): + if model_config.path.startswith("models"): model_config.path = str(Path(*Path(model_config.path).parts[1:])) - + model_path = self.app_config.models_path.absolute() / model_config.path if not model_path.exists(): model_class = MODEL_CLASSES[cur_base_model][cur_model_type] diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index d548ec7026..78423f6b78 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -541,6 +541,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): if downloads := section.get("download_ids"): selections.install_models.extend(downloads.value.split()) + class AddModelApplication(npyscreen.NPSAppManaged): def __init__(self, opt): super().__init__() @@ -625,9 +626,9 @@ def _ask_user_for_pt_tui(model_path: Path, tui_conn: Connection) -> SchedulerPre # -------------------------------------------------------- def process_and_execute( - opt: Namespace, - selections: InstallSelections, - conn_out: Connection = None, + opt: Namespace, + selections: InstallSelections, + conn_out: Connection = None, ): # need to reinitialize config in subprocess config = InvokeAIAppConfig.get_config() @@ -641,7 +642,7 @@ def process_and_execute( logger = InvokeAILogger.getLogger() logger.handlers.clear() logger.addHandler(logging.StreamHandler(translator)) - + installer = ModelInstall(config, prediction_type_helper=lambda x: ask_user_for_prediction_type(x, conn_out)) installer.install(selections) From 99daa979786d0889de5d8b7e3386ecdc946fb622 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 13:00:07 -0400 Subject: [PATCH 04/20] more refactoring; fixed place where rel conversion missed --- .../backend/install/model_install_backend.py | 14 +++++- .../backend/model_management/model_cache.py | 2 +- .../backend/model_management/model_manager.py | 46 ++++++++++--------- invokeai/frontend/install/model_install.py | 25 ---------- 4 files changed, 39 insertions(+), 48 deletions(-) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 67ca508fe1..f0dcb7585b 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -129,7 +129,7 @@ class ModelInstall(object): model_dict[key] = ModelLoadInfo(**value) # supplement with entries in models.yaml - installed_models = self.mgr.list_models() + installed_models = [x for x in self.mgr.list_models() if not self._is_autoloaded(x)] for md in installed_models: base = md["base_model"] @@ -148,6 +148,18 @@ class ModelInstall(object): ) return {x: model_dict[x] for x in sorted(model_dict.keys(), key=lambda y: model_dict[y].name.lower())} + def _is_autoloaded(self, model_info: dict) -> bool: + path = model_info.get("path") + if not path: + return False + for autodir in ['autoimport_dir','lora_dir','embedding_dir','controlnet_dir']: + if autodir_path := getattr(self.config, autodir): + autodir_path = self.config.root_path / autodir_path + print(f'{path} => {autodir_path}; is_relative={Path(path).is_relative_to(autodir_path)}',file=log) + if Path(path).is_relative_to(autodir_path): + return True + return False + def list_models(self, model_type): installed = self.mgr.list_models(model_type=model_type) print(f"Installed models of type `{model_type}`:") diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index a171c81e3c..a8c619b055 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -187,7 +187,7 @@ class ModelCache(object): # TODO: lock for no copies on simultaneous calls? cache_entry = self._cached_models.get(key, None) if cache_entry is None: - self.logger.info(f"Loading model {model_path}, type {base_model}:{model_type}:{submodel}") + self.logger.info(f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value}") # this will remove older cached models until # there is sufficient room to load the requested model diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index d377e86e31..a23902c407 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -586,7 +586,7 @@ class ModelManager(object): # expose paths as absolute to help web UI if path := model_dict.get("path"): - model_dict["path"] = str(self.app_config.root_path / path) + model_dict["path"] = str(self.resolve_model_path(path)) models.append(model_dict) return models @@ -654,10 +654,9 @@ class ModelManager(object): The returned dict has the same format as the dict returned by model_info(). """ - # relativize paths as they go in - this makes it easier to move the root directory around + # relativize paths as they go in - this makes it easier to move the models directory around if path := model_attributes.get("path"): - if Path(path).is_relative_to(self.app_config.models_path): - model_attributes["path"] = str(Path(path).relative_to(self.app_config.models_path)) + model_attributes["path"] = str(self.relative_model_path(Path(path))) model_class = MODEL_CLASSES[base_model][model_type] model_config = model_class.create_config(**model_attributes) @@ -715,7 +714,7 @@ class ModelManager(object): if not model_cfg: raise ModelNotFoundException(f"Unknown model: {model_key}") - old_path = self.app_config.root_path / model_cfg.path + old_path = self.resolve_model_path(model_cfg.path) new_name = new_name or model_name new_base = new_base or base_model new_key = self.create_key(new_name, new_base, model_type) @@ -725,11 +724,13 @@ class ModelManager(object): # if this is a model file/directory that we manage ourselves, we need to move it if old_path.is_relative_to(self.app_config.models_path): new_path = ( - self.app_config.root_path - / "models" - / BaseModelType(new_base).value - / ModelType(model_type).value - / new_name + self.resolve_model_path( + Path( + BaseModelType(new_base).value, + ModelType(model_type).value, + new_name, + ) + ) ) move(old_path, new_path) model_cfg.path = str(new_path.relative_to(self.app_config.models_path)) @@ -810,9 +811,15 @@ class ModelManager(object): return result - def resolve_model_path(self, path: str) -> Path: + def resolve_model_path(self, path: Union[Path,str]) -> Path: + """return relative paths based on configured models_path""" return self.app_config.models_path / path + def relative_model_path(self, model_path: Path) -> Path: + if model_path.is_relative_to(self.app_config.models_path): + model_path = model_path.relative_to(self.app_config.models_path) + return model_path + def search_models(self, search_folder): self.logger.info(f"Finding Models In: {search_folder}") models_folder_ckpt = Path(search_folder).glob("**/*.ckpt") @@ -896,7 +903,7 @@ class ModelManager(object): if model_config.path.startswith("models"): model_config.path = str(Path(*Path(model_config.path).parts[1:])) - model_path = self.app_config.models_path.absolute() / model_config.path + model_path = self.resolve_model_path(model_config.path).absolute() if not model_path.exists(): model_class = MODEL_CLASSES[cur_base_model][cur_model_type] if model_class.save_to_config: @@ -915,7 +922,7 @@ class ModelManager(object): if model_type is not None and cur_model_type != model_type: continue model_class = MODEL_CLASSES[cur_base_model][cur_model_type] - models_dir = self.app_config.models_path / cur_base_model.value / cur_model_type.value + models_dir = self.resolve_model_path(Path(cur_base_model.value, cur_model_type.value)) if not models_dir.exists(): continue # TODO: or create all folders? @@ -928,10 +935,8 @@ class ModelManager(object): try: if model_key in self.models: raise DuplicateModelException(f"Model with key {model_key} added twice") - - if model_path.is_relative_to(self.app_config.models_path): - model_path = model_path.relative_to(self.app_config.models_path) - + + model_path = self.relative_model_path(model_path) model_config: ModelConfigBase = model_class.probe_config(str(model_path)) self.models[model_key] = model_config new_models_found = True @@ -942,12 +947,11 @@ class ModelManager(object): except NotImplementedError as e: self.logger.warning(e) - imported_models = self.autoimport() - + imported_models = self.scan_autoimport_directory() if (new_models_found or imported_models) and self.config_path: self.commit() - def autoimport(self) -> Dict[str, AddModelResult]: + def scan_autoimport_directory(self) -> Dict[str, AddModelResult]: """ Scan the autoimport directory (if defined) and import new models, delete defunct models. """ @@ -981,7 +985,7 @@ class ModelManager(object): # LS: hacky # Patch in the SD VAE from core so that it is available for use by the UI try: - self.heuristic_import({config.models_path / "core/convert/sd-vae-ft-mse"}) + self.heuristic_import({self.resolve_model_path("core/convert/sd-vae-ft-mse")}) except: pass diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 78423f6b78..97065bf2d8 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -650,31 +650,6 @@ def process_and_execute( conn_out.send_bytes("*done*".encode("utf-8")) conn_out.close() - -def do_listings(opt) -> bool: - """List installed models of various sorts, and return - True if any were requested.""" - model_manager = ModelManager(config.model_conf_path) - if opt.list_models == "diffusers": - print("Diffuser models:") - model_manager.print_models() - elif opt.list_models == "controlnets": - print("Installed Controlnet Models:") - cnm = model_manager.list_controlnet_models() - print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]), prefix=" ")) - elif opt.list_models == "loras": - print("Installed LoRA/LyCORIS Models:") - cnm = model_manager.list_lora_models() - print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]), prefix=" ")) - elif opt.list_models == "tis": - print("Installed Textual Inversion Embeddings:") - cnm = model_manager.list_ti_models() - print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]), prefix=" ")) - else: - return False - return True - - # -------------------------------------------------------- def select_and_download_models(opt: Namespace): precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device())) From 0fb7328022b7fcbbc6a929456f5d4d7e9c6fd9b7 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 13:00:43 -0400 Subject: [PATCH 05/20] blackify code --- .../backend/install/model_install_backend.py | 3 +-- .../backend/model_management/model_manager.py | 16 +++++++--------- invokeai/frontend/install/model_install.py | 1 + 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index f0dcb7585b..7a9acd4235 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -152,10 +152,9 @@ class ModelInstall(object): path = model_info.get("path") if not path: return False - for autodir in ['autoimport_dir','lora_dir','embedding_dir','controlnet_dir']: + for autodir in ["autoimport_dir", "lora_dir", "embedding_dir", "controlnet_dir"]: if autodir_path := getattr(self.config, autodir): autodir_path = self.config.root_path / autodir_path - print(f'{path} => {autodir_path}; is_relative={Path(path).is_relative_to(autodir_path)}',file=log) if Path(path).is_relative_to(autodir_path): return True return False diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index a23902c407..81dc0e5d07 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -723,13 +723,11 @@ class ModelManager(object): # if this is a model file/directory that we manage ourselves, we need to move it if old_path.is_relative_to(self.app_config.models_path): - new_path = ( - self.resolve_model_path( - Path( - BaseModelType(new_base).value, - ModelType(model_type).value, - new_name, - ) + new_path = self.resolve_model_path( + Path( + BaseModelType(new_base).value, + ModelType(model_type).value, + new_name, ) ) move(old_path, new_path) @@ -811,7 +809,7 @@ class ModelManager(object): return result - def resolve_model_path(self, path: Union[Path,str]) -> Path: + def resolve_model_path(self, path: Union[Path, str]) -> Path: """return relative paths based on configured models_path""" return self.app_config.models_path / path @@ -935,7 +933,7 @@ class ModelManager(object): try: if model_key in self.models: raise DuplicateModelException(f"Model with key {model_key} added twice") - + model_path = self.relative_model_path(model_path) model_config: ModelConfigBase = model_class.probe_config(str(model_path)) self.models[model_key] = model_config diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 97065bf2d8..4272e05207 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -650,6 +650,7 @@ def process_and_execute( conn_out.send_bytes("*done*".encode("utf-8")) conn_out.close() + # -------------------------------------------------------- def select_and_download_models(opt: Namespace): precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device())) From 24b19166dd7e8921d92b4da1eb4de38d71b8e9bb Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 13:13:22 -0400 Subject: [PATCH 06/20] further refactoring --- invokeai/backend/model_management/model_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 81dc0e5d07..e381fef567 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -423,7 +423,7 @@ class ModelManager(object): return (model_name, base_model, model_type) def _get_model_cache_path(self, model_path): - return self.resolve_model_path(".cache" + "/" + hashlib.md5(str(model_path).encode()).hexdigest()) + return self.resolve_model_path(Path(".cache") / hashlib.md5(str(model_path).encode()).hexdigest()) @classmethod def initialize_model_config(cls, config_path: Path): From 72c519c6ad21bfebae4e25e94655b233f6f47398 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 13:51:47 -0400 Subject: [PATCH 07/20] fix incorrect key construction --- invokeai/backend/model_management/convert_ckpt_to_diffusers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index 2c62b8b192..5a3228658e 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -422,7 +422,7 @@ def convert_ldm_unet_checkpoint( ) for key in keys: if key.startswith("model.diffusion_model"): - flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) + flat_ema_key = "model_ema." + ".".join(key.split(".")[1:]) unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) else: if sum(k.startswith("model_ema") for k in keys) > 100: From 2a2d988928e2606e24f56deebdc262e101029d32 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 15:28:39 -0400 Subject: [PATCH 08/20] convert script handles more ckpt variants --- .../convert_ckpt_to_diffusers.py | 45 ++++++++++++------- .../model_management/models/controlnet.py | 10 ++--- .../models/stable_diffusion.py | 1 + 3 files changed, 35 insertions(+), 21 deletions(-) diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index 5a3228658e..d6d61ee71d 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -422,8 +422,11 @@ def convert_ldm_unet_checkpoint( ) for key in keys: if key.startswith("model.diffusion_model"): - flat_ema_key = "model_ema." + ".".join(key.split(".")[1:]) - unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) + for delimiter in ['','.']: + flat_ema_key = "model_ema." + delimiter.join(key.split(".")[1:]) + if checkpoint.get(flat_ema_key) is not None: + unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) + break else: if sum(k.startswith("model_ema") for k in keys) > 100: logger.warning( @@ -1070,7 +1073,7 @@ def convert_controlnet_checkpoint( extract_ema, use_linear_projection=None, cross_attention_dim=None, - precision: torch.dtype = torch.float32, + precision: torch.dtype = None, ): ctrlnet_config = create_unet_diffusers_config(original_config, image_size=image_size, controlnet=True) ctrlnet_config["upcast_attention"] = upcast_attention @@ -1121,7 +1124,7 @@ def download_from_original_stable_diffusion_ckpt( prediction_type: str = None, model_type: str = None, extract_ema: bool = False, - precision: torch.dtype = torch.float32, + precision: torch.dtype = None, scheduler_type: str = "pndm", num_in_channels: Optional[int] = None, upcast_attention: Optional[bool] = None, @@ -1194,6 +1197,8 @@ def download_from_original_stable_diffusion_ckpt( [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer) to use. If this parameter is `None`, the function will load a new instance of [CLIPTokenizer] by itself, if needed. + precision (`torch.dtype`, *optional*, defauts to `None`): + If not provided the precision will be set to the precision of the original file. return: A StableDiffusionPipeline object representing the passed-in `.ckpt`/`.safetensors` file. """ @@ -1251,6 +1256,10 @@ def download_from_original_stable_diffusion_ckpt( checkpoint = checkpoint["state_dict"] logger.debug(f"model_type = {model_type}; original_config_file = {original_config_file}") + + precision_probing_key = "model.diffusion_model.input_blocks.0.0.bias" + logger.debug(f"original checkpoint precision == {checkpoint[precision_probing_key].dtype}") + precision = precision or checkpoint[precision_probing_key].dtype if original_config_file is None: key_name_v2_1 = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" @@ -1281,7 +1290,7 @@ def download_from_original_stable_diffusion_ckpt( original_config = OmegaConf.load(original_config_file) if ( model_version == BaseModelType.StableDiffusion2 - and original_config["model"]["params"]["parameterization"] == "v" + and original_config["model"]["params"].get("parameterization") == "v" ): prediction_type = "v_prediction" upcast_attention = True @@ -1447,7 +1456,7 @@ def download_from_original_stable_diffusion_ckpt( if controlnet: pipe = pipeline_class( vae=vae.to(precision), - text_encoder=text_model, + text_encoder=text_model.to(precision), tokenizer=tokenizer, unet=unet.to(precision), scheduler=scheduler, @@ -1459,7 +1468,7 @@ def download_from_original_stable_diffusion_ckpt( else: pipe = pipeline_class( vae=vae.to(precision), - text_encoder=text_model, + text_encoder=text_model.to(precision), tokenizer=tokenizer, unet=unet.to(precision), scheduler=scheduler, @@ -1484,8 +1493,8 @@ def download_from_original_stable_diffusion_ckpt( image_noising_scheduler=image_noising_scheduler, # regular denoising components tokenizer=tokenizer, - text_encoder=text_model, - unet=unet, + text_encoder=text_model.to(precision), + unet=unet.to(precision), scheduler=scheduler, # vae vae=vae, @@ -1560,7 +1569,7 @@ def download_from_original_stable_diffusion_ckpt( if controlnet: pipe = pipeline_class( vae=vae.to(precision), - text_encoder=text_model, + text_encoder=text_model.to(precision), tokenizer=tokenizer, unet=unet.to(precision), controlnet=controlnet, @@ -1571,7 +1580,7 @@ def download_from_original_stable_diffusion_ckpt( else: pipe = pipeline_class( vae=vae.to(precision), - text_encoder=text_model, + text_encoder=text_model.to(precision), tokenizer=tokenizer, unet=unet.to(precision), scheduler=scheduler, @@ -1594,9 +1603,9 @@ def download_from_original_stable_diffusion_ckpt( pipe = StableDiffusionXLPipeline( vae=vae.to(precision), - text_encoder=text_encoder, + text_encoder=text_encoder.to(precision), tokenizer=tokenizer, - text_encoder_2=text_encoder_2, + text_encoder_2=text_encoder_2.to(precision), tokenizer_2=tokenizer_2, unet=unet.to(precision), scheduler=scheduler, @@ -1639,7 +1648,7 @@ def download_controlnet_from_original_ckpt( original_config_file: str, image_size: int = 512, extract_ema: bool = False, - precision: torch.dtype = torch.float32, + precision: torch.dtype = None, num_in_channels: Optional[int] = None, upcast_attention: Optional[bool] = None, device: str = None, @@ -1680,6 +1689,12 @@ def download_controlnet_from_original_ckpt( while "state_dict" in checkpoint: checkpoint = checkpoint["state_dict"] + # use original precision + precision_probing_key = 'input_blocks.0.0.bias' + ckpt_precision = checkpoint[precision_probing_key].dtype + logger.debug(f'original controlnet precision = {ckpt_precision}') + precision = precision or ckpt_precision + original_config = OmegaConf.load(original_config_file) if num_in_channels is not None: @@ -1699,7 +1714,7 @@ def download_controlnet_from_original_ckpt( cross_attention_dim=cross_attention_dim, ) - return controlnet + return controlnet.to(precision) def convert_ldm_vae_to_diffusers(checkpoint, vae_config: DictConfig, image_size: int) -> AutoencoderKL: diff --git a/invokeai/backend/model_management/models/controlnet.py b/invokeai/backend/model_management/models/controlnet.py index e075843a56..ed1e7316dc 100644 --- a/invokeai/backend/model_management/models/controlnet.py +++ b/invokeai/backend/model_management/models/controlnet.py @@ -17,7 +17,7 @@ from .base import ( ModelNotFoundException, ) from invokeai.app.services.config import InvokeAIAppConfig - +import invokeai.backend.util.logging as logger class ControlNetModelFormat(str, Enum): Checkpoint = "checkpoint" @@ -66,7 +66,7 @@ class ControlNetModel(ModelBase): child_type: Optional[SubModelType] = None, ): if child_type is not None: - raise Exception("There is no child models in controlnet model") + raise Exception("There are no child models in controlnet model") model = None for variant in ["fp16", None]: @@ -123,10 +123,7 @@ class ControlNetModel(ModelBase): else: return model_path - -@classmethod def _convert_controlnet_ckpt_and_cache( - cls, model_path: str, output_path: str, base_model: BaseModelType, @@ -140,7 +137,8 @@ def _convert_controlnet_ckpt_and_cache( app_config = InvokeAIAppConfig.get_config() weights = app_config.root_path / model_path output_path = Path(output_path) - + + logger.info(f"Converting {weights} to diffusers format") # return cached version if it exists if output_path.exists(): return output_path diff --git a/invokeai/backend/model_management/models/stable_diffusion.py b/invokeai/backend/model_management/models/stable_diffusion.py index 76b4833f9c..e4396a9582 100644 --- a/invokeai/backend/model_management/models/stable_diffusion.py +++ b/invokeai/backend/model_management/models/stable_diffusion.py @@ -123,6 +123,7 @@ class StableDiffusion1Model(DiffusersModel): return _convert_ckpt_and_cache( version=BaseModelType.StableDiffusion1, model_config=config, + load_safety_checker=False, output_path=output_path, ) else: From 781322a6473f0633b8f0bb56d2d5b49098d93c1f Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 16:16:44 -0400 Subject: [PATCH 09/20] installer respects INVOKEAI_ROOT for default root location --- installer/lib/installer.py | 5 ++++- invokeai/frontend/install/model_install.py | 17 +++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/installer/lib/installer.py b/installer/lib/installer.py index e1ca8c2e8f..f15c83ba66 100644 --- a/installer/lib/installer.py +++ b/installer/lib/installer.py @@ -168,7 +168,10 @@ class Installer: messages.welcome() - self.dest = Path(root).expanduser().resolve() if yes_to_all else messages.dest_path(root) + invokeai_root = os.environ.get('INVOKEAI_ROOT') + default_path = invokeai_root or Path(root).expanduser().resolve() + + self.dest = default_path if yes_to_all else messages.dest_path(root) # create the venv for the app self.venv = self.app_venv() diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index ea9efe1908..78dd0f88d0 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -58,6 +58,8 @@ logger = InvokeAILogger.getLogger() # from https://stackoverflow.com/questions/92438/stripping-non-printable-characters-from-a-string-in-python NOPRINT_TRANS_TABLE = {i: None for i in range(0, sys.maxunicode + 1) if not chr(i).isprintable()} +# maximum number of installed models we can display before overflowing vertically +MAX_OTHER_MODELS = 72 def make_printable(s: str) -> str: """Replace non-printable characters in a string""" @@ -271,6 +273,11 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): ) ) + truncation = False + if len(model_labels) > MAX_OTHER_MODELS: + model_labels = model_labels[0:MAX_OTHER_MODELS] + truncation = True + widgets.update( models_selected=self.add_widget_intelligent( MultiSelectColumns, @@ -289,6 +296,16 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): models=model_list, ) + if truncation: + widgets.update( + warning_message = self.add_widget_intelligent( + npyscreen.FixedText, + value=f"Too many models to display (max={MAX_OTHER_MODELS}). Some are not displayed.", + editable=False, + color="CAUTION", + ) + ) + self.nextrely += 1 widgets.update( download_ids=self.add_widget_intelligent( From 3f9105be5005639f8e5ad7518d4f94f0ac37a3d8 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 17:17:45 -0400 Subject: [PATCH 10/20] make convert script respect setting of use_ema in config file --- .../model_management/convert_ckpt_to_diffusers.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index d6d61ee71d..b371fc96e8 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -422,11 +422,8 @@ def convert_ldm_unet_checkpoint( ) for key in keys: if key.startswith("model.diffusion_model"): - for delimiter in ['','.']: - flat_ema_key = "model_ema." + delimiter.join(key.split(".")[1:]) - if checkpoint.get(flat_ema_key) is not None: - unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) - break + flat_ema_key = "model_ema." + "".join(key.split(".")[2:]) + unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) else: if sum(k.startswith("model_ema") for k in keys) > 100: logger.warning( @@ -1114,7 +1111,6 @@ def convert_controlnet_checkpoint( return controlnet.to(precision) -# TO DO - PASS PRECISION def download_from_original_stable_diffusion_ckpt( checkpoint_path: str, model_version: BaseModelType, @@ -1288,6 +1284,9 @@ def download_from_original_stable_diffusion_ckpt( original_config_file = BytesIO(requests.get(config_url).content) original_config = OmegaConf.load(original_config_file) + if original_config['model']['params'].get('use_ema') is not None: + extract_ema = original_config['model']['params']['use_ema'] + if ( model_version == BaseModelType.StableDiffusion2 and original_config["model"]["params"].get("parameterization") == "v" From 1de783b1cede6cac409e3599f9204a067b322c2d Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 17:20:26 -0400 Subject: [PATCH 11/20] fix mistake in indexing flat_ema_key --- invokeai/backend/model_management/convert_ckpt_to_diffusers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index b371fc96e8..4a95d1d980 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -422,7 +422,7 @@ def convert_ldm_unet_checkpoint( ) for key in keys: if key.startswith("model.diffusion_model"): - flat_ema_key = "model_ema." + "".join(key.split(".")[2:]) + flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) else: if sum(k.startswith("model_ema") for k in keys) > 100: From e82eb0b9fc6a61d52087d4d327a05b476dfe57e5 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 17:30:21 -0400 Subject: [PATCH 12/20] add correct optional annotation to precision arg --- .../backend/model_management/convert_ckpt_to_diffusers.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index 4a95d1d980..8565c1aef7 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -1070,7 +1070,7 @@ def convert_controlnet_checkpoint( extract_ema, use_linear_projection=None, cross_attention_dim=None, - precision: torch.dtype = None, + precision: Optional[torch.dtype] = None, ): ctrlnet_config = create_unet_diffusers_config(original_config, image_size=image_size, controlnet=True) ctrlnet_config["upcast_attention"] = upcast_attention @@ -1120,7 +1120,7 @@ def download_from_original_stable_diffusion_ckpt( prediction_type: str = None, model_type: str = None, extract_ema: bool = False, - precision: torch.dtype = None, + precision: Optional[torch.dtype] = None, scheduler_type: str = "pndm", num_in_channels: Optional[int] = None, upcast_attention: Optional[bool] = None, @@ -1647,7 +1647,7 @@ def download_controlnet_from_original_ckpt( original_config_file: str, image_size: int = 512, extract_ema: bool = False, - precision: torch.dtype = None, + precision: Optional[torch.dtype] = None, num_in_channels: Optional[int] = None, upcast_attention: Optional[bool] = None, device: str = None, From 348bee89817df4e59c058e349f63e46a71bcc92d Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 17:30:54 -0400 Subject: [PATCH 13/20] blackified --- invokeai/app/invocations/params.py | 6 +++--- .../model_management/convert_ckpt_to_diffusers.py | 12 ++++++------ .../backend/model_management/models/controlnet.py | 4 +++- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/invokeai/app/invocations/params.py b/invokeai/app/invocations/params.py index 1a03baa7cc..513eb8762f 100644 --- a/invokeai/app/invocations/params.py +++ b/invokeai/app/invocations/params.py @@ -6,8 +6,7 @@ from pydantic import Field from invokeai.app.invocations.prompt import PromptOutput -from .baseinvocation import (BaseInvocation, BaseInvocationOutput, - InvocationConfig, InvocationContext) +from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationConfig, InvocationContext from .math import FloatOutput, IntOutput # Pass-through parameter nodes - used by subgraphs @@ -68,6 +67,7 @@ class ParamStringInvocation(BaseInvocation): def invoke(self, context: InvocationContext) -> StringOutput: return StringOutput(text=self.text) + class ParamPromptInvocation(BaseInvocation): """A prompt input parameter""" @@ -80,4 +80,4 @@ class ParamPromptInvocation(BaseInvocation): } def invoke(self, context: InvocationContext) -> PromptOutput: - return PromptOutput(prompt=self.prompt) \ No newline at end of file + return PromptOutput(prompt=self.prompt) diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index 8565c1aef7..3893081b39 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -1252,7 +1252,7 @@ def download_from_original_stable_diffusion_ckpt( checkpoint = checkpoint["state_dict"] logger.debug(f"model_type = {model_type}; original_config_file = {original_config_file}") - + precision_probing_key = "model.diffusion_model.input_blocks.0.0.bias" logger.debug(f"original checkpoint precision == {checkpoint[precision_probing_key].dtype}") precision = precision or checkpoint[precision_probing_key].dtype @@ -1284,9 +1284,9 @@ def download_from_original_stable_diffusion_ckpt( original_config_file = BytesIO(requests.get(config_url).content) original_config = OmegaConf.load(original_config_file) - if original_config['model']['params'].get('use_ema') is not None: - extract_ema = original_config['model']['params']['use_ema'] - + if original_config["model"]["params"].get("use_ema") is not None: + extract_ema = original_config["model"]["params"]["use_ema"] + if ( model_version == BaseModelType.StableDiffusion2 and original_config["model"]["params"].get("parameterization") == "v" @@ -1689,9 +1689,9 @@ def download_controlnet_from_original_ckpt( checkpoint = checkpoint["state_dict"] # use original precision - precision_probing_key = 'input_blocks.0.0.bias' + precision_probing_key = "input_blocks.0.0.bias" ckpt_precision = checkpoint[precision_probing_key].dtype - logger.debug(f'original controlnet precision = {ckpt_precision}') + logger.debug(f"original controlnet precision = {ckpt_precision}") precision = precision or ckpt_precision original_config = OmegaConf.load(original_config_file) diff --git a/invokeai/backend/model_management/models/controlnet.py b/invokeai/backend/model_management/models/controlnet.py index ed1e7316dc..061be7ae49 100644 --- a/invokeai/backend/model_management/models/controlnet.py +++ b/invokeai/backend/model_management/models/controlnet.py @@ -19,6 +19,7 @@ from .base import ( from invokeai.app.services.config import InvokeAIAppConfig import invokeai.backend.util.logging as logger + class ControlNetModelFormat(str, Enum): Checkpoint = "checkpoint" Diffusers = "diffusers" @@ -123,6 +124,7 @@ class ControlNetModel(ModelBase): else: return model_path + def _convert_controlnet_ckpt_and_cache( model_path: str, output_path: str, @@ -137,7 +139,7 @@ def _convert_controlnet_ckpt_and_cache( app_config = InvokeAIAppConfig.get_config() weights = app_config.root_path / model_path output_path = Path(output_path) - + logger.info(f"Converting {weights} to diffusers format") # return cached version if it exists if output_path.exists(): From 73f3b7f84bd9bc6866923993b2084839b5f48eaa Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 17:32:33 -0400 Subject: [PATCH 14/20] remove dangling comment --- invokeai/backend/install/model_install_backend.py | 1 - 1 file changed, 1 deletion(-) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 7a9acd4235..7658b5b44f 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -103,7 +103,6 @@ class ModelInstall(object): access_token: str = None, ): self.config = config - # force model manager to be a singleton self.mgr = model_manager or ModelManager(config.model_conf_path) self.datasets = OmegaConf.load(Dataset_path) self.prediction_helper = prediction_type_helper From c2eb50d1cdb85059a289fc2a712361cb9a5b438a Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 19:19:42 -0400 Subject: [PATCH 15/20] make installer use initial INVOKEAI_ROOT as default install location --- installer/lib/installer.py | 6 ++---- installer/lib/main.py | 3 ++- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/installer/lib/installer.py b/installer/lib/installer.py index f15c83ba66..40cfcedeb2 100644 --- a/installer/lib/installer.py +++ b/installer/lib/installer.py @@ -149,7 +149,7 @@ class Installer: return venv_dir def install( - self, root: str = "~/invokeai-3", version: str = "latest", yes_to_all=False, find_links: Path = None + self, root: str = "~/invokeai", version: str = "latest", yes_to_all=False, find_links: Path = None ) -> None: """ Install the InvokeAI application into the given runtime path @@ -168,9 +168,7 @@ class Installer: messages.welcome() - invokeai_root = os.environ.get('INVOKEAI_ROOT') - default_path = invokeai_root or Path(root).expanduser().resolve() - + default_path = os.environ.get('INVOKEAI_ROOT') or Path(root).expanduser().resolve() self.dest = default_path if yes_to_all else messages.dest_path(root) # create the venv for the app diff --git a/installer/lib/main.py b/installer/lib/main.py index b442f49255..4b74f984dc 100644 --- a/installer/lib/main.py +++ b/installer/lib/main.py @@ -3,6 +3,7 @@ InvokeAI Installer """ import argparse +import os from pathlib import Path from installer import Installer @@ -15,7 +16,7 @@ if __name__ == "__main__": dest="root", type=str, help="Destination path for installation", - default="~/invokeai", + default=os.environ.get('INVOKEAI_ROOT') or "~/invokeai", ) parser.add_argument( "-y", From b10b07220ec4253cc7e705a63154da268bcd3a0f Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 19:20:20 -0400 Subject: [PATCH 16/20] blackify code --- installer/lib/installer.py | 2 +- installer/lib/main.py | 2 +- invokeai/frontend/install/model_install.py | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/installer/lib/installer.py b/installer/lib/installer.py index 40cfcedeb2..e0a72b34aa 100644 --- a/installer/lib/installer.py +++ b/installer/lib/installer.py @@ -168,7 +168,7 @@ class Installer: messages.welcome() - default_path = os.environ.get('INVOKEAI_ROOT') or Path(root).expanduser().resolve() + default_path = os.environ.get("INVOKEAI_ROOT") or Path(root).expanduser().resolve() self.dest = default_path if yes_to_all else messages.dest_path(root) # create the venv for the app diff --git a/installer/lib/main.py b/installer/lib/main.py index 4b74f984dc..4f1372652b 100644 --- a/installer/lib/main.py +++ b/installer/lib/main.py @@ -16,7 +16,7 @@ if __name__ == "__main__": dest="root", type=str, help="Destination path for installation", - default=os.environ.get('INVOKEAI_ROOT') or "~/invokeai", + default=os.environ.get("INVOKEAI_ROOT") or "~/invokeai", ) parser.add_argument( "-y", diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 78dd0f88d0..cdb1d165fd 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -61,6 +61,7 @@ NOPRINT_TRANS_TABLE = {i: None for i in range(0, sys.maxunicode + 1) if not chr( # maximum number of installed models we can display before overflowing vertically MAX_OTHER_MODELS = 72 + def make_printable(s: str) -> str: """Replace non-printable characters in a string""" return s.translate(NOPRINT_TRANS_TABLE) @@ -298,7 +299,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): if truncation: widgets.update( - warning_message = self.add_widget_intelligent( + warning_message=self.add_widget_intelligent( npyscreen.FixedText, value=f"Too many models to display (max={MAX_OTHER_MODELS}). Some are not displayed.", editable=False, From 43b1eb8e8459db0360ed2dcc8bff859035f3d470 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 19:49:58 -0400 Subject: [PATCH 17/20] wording changes --- invokeai/frontend/install/invokeai_update.py | 2 +- invokeai/frontend/install/model_install.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/invokeai/frontend/install/invokeai_update.py b/invokeai/frontend/install/invokeai_update.py index 56d1a313c7..3fe6ff1574 100644 --- a/invokeai/frontend/install/invokeai_update.py +++ b/invokeai/frontend/install/invokeai_update.py @@ -112,7 +112,7 @@ def main(): extras = get_extras() - print(f":crossed_fingers: Upgrading to [yellow]{tag if tag else release}[/yellow]") + print(f":crossed_fingers: Upgrading to [yellow]{tag or release or branch}[/yellow]") if release: cmd = f'pip install "invokeai{extras} @ {INVOKE_AI_SRC}/{release}.zip" --use-pep517 --upgrade' elif tag: diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index cdb1d165fd..7616fa8c81 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -105,7 +105,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): SingleSelectColumns, values=[ "STARTER MODELS", - "MORE MODELS", + "MAIN MODELS", "CONTROLNETS", "LORA/LYCORIS", "TEXTUAL INVERSION", @@ -331,7 +331,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): widgets = self.add_model_widgets( model_type=model_type, window_width=window_width, - install_prompt=f"Additional {model_type.value.title()} models already installed.", + install_prompt=f"Installed {model_type.value.title()} models. Unchecked models in the InvokeAI root directory will be deleted. Enter URLs, paths or repo_ids to import.", **kwargs, ) From 844578ab883b64691d02f343d79632ad62e50a3b Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 30 Jul 2023 07:57:10 -0400 Subject: [PATCH 18/20] fix lora loading crash --- invokeai/backend/model_management/model_cache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index a8c619b055..931b6c6b34 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -187,7 +187,7 @@ class ModelCache(object): # TODO: lock for no copies on simultaneous calls? cache_entry = self._cached_models.get(key, None) if cache_entry is None: - self.logger.info(f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value}") + self.logger.info(f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value if submodel else ''}") # this will remove older cached models until # there is sufficient room to load the requested model From ac84a9f91578afd980947b5648d9965986ae6661 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 30 Jul 2023 08:05:05 -0400 Subject: [PATCH 19/20] reenable display of autoloaded models --- invokeai/backend/install/model_install_backend.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 7658b5b44f..e6e400ca70 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -128,7 +128,9 @@ class ModelInstall(object): model_dict[key] = ModelLoadInfo(**value) # supplement with entries in models.yaml - installed_models = [x for x in self.mgr.list_models() if not self._is_autoloaded(x)] + installed_models = [x for x in self.mgr.list_models()] + # suppresses autoloaded models + # installed_models = [x for x in self.mgr.list_models() if not self._is_autoloaded(x)] for md in installed_models: base = md["base_model"] From e20c4dc1e81709d021d4a7081bfe032f285a2c8e Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 30 Jul 2023 08:17:10 -0400 Subject: [PATCH 20/20] blackified --- invokeai/backend/model_management/model_cache.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 931b6c6b34..4c18068bae 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -187,7 +187,9 @@ class ModelCache(object): # TODO: lock for no copies on simultaneous calls? cache_entry = self._cached_models.get(key, None) if cache_entry is None: - self.logger.info(f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value if submodel else ''}") + self.logger.info( + f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value if submodel else ''}" + ) # this will remove older cached models until # there is sufficient room to load the requested model