From 9968ff2893c13b73f7ff031ea690f2344c8bab24 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 10:30:27 -0400 Subject: [PATCH 01/10] fix relative model paths to be against config.models_path, not root --- invokeai/app/services/config.py | 3 +- .../backend/install/invokeai_configure.py | 2 +- .../backend/install/model_install_backend.py | 8 +++-- .../convert_ckpt_to_diffusers.py | 6 ++-- .../backend/model_management/model_manager.py | 29 ++++++++++------- .../models/stable_diffusion.py | 2 +- invokeai/frontend/install/model_install.py | 31 ++++++------------- 7 files changed, 39 insertions(+), 42 deletions(-) diff --git a/invokeai/app/services/config.py b/invokeai/app/services/config.py index 98855fe879..a3508e11ba 100644 --- a/invokeai/app/services/config.py +++ b/invokeai/app/services/config.py @@ -171,7 +171,6 @@ from pydantic import BaseSettings, Field, parse_obj_as from typing import ClassVar, Dict, List, Set, Literal, Union, get_origin, get_type_hints, get_args INIT_FILE = Path("invokeai.yaml") -MODEL_CORE = Path("models/core") DB_FILE = Path("invokeai.db") LEGACY_INIT_FILE = Path("invokeai.init") @@ -357,7 +356,7 @@ def _find_root() -> Path: venv = Path(os.environ.get("VIRTUAL_ENV") or ".") if os.environ.get("INVOKEAI_ROOT"): root = Path(os.environ.get("INVOKEAI_ROOT")).resolve() - elif any([(venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE, MODEL_CORE]]): + elif any([(venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE]]): root = (venv.parent).resolve() else: root = Path("~/invokeai").expanduser().resolve() diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index 972e6668c4..4bf2a484a1 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -181,7 +181,7 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th def download_conversion_models(): - target_dir = config.root_path / "models/core/convert" + target_dir = config.models_path / "core/convert" kwargs = dict() # for future use try: logger.info("Downloading core tokenizers and text encoders") diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index b3ab88b5dd..a96c5ff776 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -103,6 +103,7 @@ class ModelInstall(object): access_token: str = None, ): self.config = config + # force model manager to be a singleton self.mgr = model_manager or ModelManager(config.model_conf_path) self.datasets = OmegaConf.load(Dataset_path) self.prediction_helper = prediction_type_helper @@ -273,6 +274,7 @@ class ModelInstall(object): logger.error(f"Unable to download {url}. Skipping.") info = ModelProbe().heuristic_probe(location) dest = self.config.models_path / info.base_type.value / info.model_type.value / location.name + dest.parent.mkdir(parents=True, exist_ok=True) models_path = shutil.move(location, dest) # staged version will be garbage-collected at this time @@ -346,7 +348,7 @@ class ModelInstall(object): if key in self.datasets: description = self.datasets[key].get("description") or description - rel_path = self.relative_to_root(path) + rel_path = self.relative_to_root(path,self.config.models_path) attributes = dict( path=str(rel_path), @@ -386,8 +388,8 @@ class ModelInstall(object): attributes.update(dict(config=str(legacy_conf))) return attributes - def relative_to_root(self, path: Path) -> Path: - root = self.config.root_path + def relative_to_root(self, path: Path, root: None) -> Path: + root = root or self.config.root_path if path.is_relative_to(root): return path.relative_to(root) else: diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index 2c62b8b192..1640270dbf 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -63,7 +63,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from invokeai.backend.util.logging import InvokeAILogger -from invokeai.app.services.config import InvokeAIAppConfig, MODEL_CORE +from invokeai.app.services.config import InvokeAIAppConfig from picklescan.scanner import scan_file_path from .models import BaseModelType, ModelVariantType @@ -81,7 +81,7 @@ if is_accelerate_available(): from accelerate.utils import set_module_tensor_to_device logger = InvokeAILogger.getLogger(__name__) -CONVERT_MODEL_ROOT = InvokeAIAppConfig.get_config().root_path / MODEL_CORE / "convert" +CONVERT_MODEL_ROOT = InvokeAIAppConfig.get_config().models_path / "core/convert" def shave_segments(path, n_shave_prefix_segments=1): @@ -1281,7 +1281,7 @@ def download_from_original_stable_diffusion_ckpt( original_config = OmegaConf.load(original_config_file) if ( model_version == BaseModelType.StableDiffusion2 - and original_config["model"]["params"]["parameterization"] == "v" + and original_config["model"]["params"].get("parameterization") == "v" ): prediction_type = "v_prediction" upcast_attention = True diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 2a82061a97..d55ca55484 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -456,7 +456,7 @@ class ModelManager(object): raise ModelNotFoundException(f"Model not found - {model_key}") model_config = self.models[model_key] - model_path = self.app_config.root_path / model_config.path + model_path = self.app_config.models_path / model_config.path if not model_path.exists(): if model_class.save_to_config: @@ -623,7 +623,7 @@ class ModelManager(object): self.cache.uncache_model(cache_id) # if model inside invoke models folder - delete files - model_path = self.app_config.root_path / model_cfg.path + model_path = self.app_config.models_path / model_cfg.path cache_path = self._get_model_cache_path(model_path) if cache_path.exists(): rmtree(str(cache_path)) @@ -656,8 +656,8 @@ class ModelManager(object): """ # relativize paths as they go in - this makes it easier to move the root directory around if path := model_attributes.get("path"): - if Path(path).is_relative_to(self.app_config.root_path): - model_attributes["path"] = str(Path(path).relative_to(self.app_config.root_path)) + if Path(path).is_relative_to(self.app_config.models_path): + model_attributes["path"] = str(Path(path).relative_to(self.app_config.models_path)) model_class = MODEL_CLASSES[base_model][model_type] model_config = model_class.create_config(**model_attributes) @@ -732,7 +732,7 @@ class ModelManager(object): / new_name ) move(old_path, new_path) - model_cfg.path = str(new_path.relative_to(self.app_config.root_path)) + model_cfg.path = str(new_path.relative_to(self.app_config.models_path)) # clean up caches old_model_cache = self._get_model_cache_path(old_path) @@ -795,7 +795,7 @@ class ModelManager(object): info["path"] = ( str(new_diffusers_path) if dest_directory - else str(new_diffusers_path.relative_to(self.app_config.root_path)) + else str(new_diffusers_path.relative_to(self.app_config.models_path)) ) info.pop("config") @@ -883,10 +883,17 @@ class ModelManager(object): new_models_found = False self.logger.info(f"Scanning {self.app_config.models_path} for new models") - with Chdir(self.app_config.root_path): + with Chdir(self.app_config.models_path): for model_key, model_config in list(self.models.items()): model_name, cur_base_model, cur_model_type = self.parse_key(model_key) - model_path = self.app_config.root_path.absolute() / model_config.path + + # Patch for relative path bug in older models.yaml - paths should not + # be starting with a hard-coded 'models'. This will also fix up + # models.yaml when committed. + if model_config.path.startswith('models'): + model_config.path = str(Path(*Path(model_config.path).parts[1:])) + + model_path = self.app_config.models_path.absolute() / model_config.path if not model_path.exists(): model_class = MODEL_CLASSES[cur_base_model][cur_model_type] if model_class.save_to_config: @@ -919,8 +926,8 @@ class ModelManager(object): if model_key in self.models: raise DuplicateModelException(f"Model with key {model_key} added twice") - if model_path.is_relative_to(self.app_config.root_path): - model_path = model_path.relative_to(self.app_config.root_path) + if model_path.is_relative_to(self.app_config.models_path): + model_path = model_path.relative_to(self.app_config.models_path) model_config: ModelConfigBase = model_class.probe_config(str(model_path)) self.models[model_key] = model_config @@ -971,7 +978,7 @@ class ModelManager(object): # LS: hacky # Patch in the SD VAE from core so that it is available for use by the UI try: - self.heuristic_import({config.root_path / "models/core/convert/sd-vae-ft-mse"}) + self.heuristic_import({config.models_path / "core/convert/sd-vae-ft-mse"}) except: pass diff --git a/invokeai/backend/model_management/models/stable_diffusion.py b/invokeai/backend/model_management/models/stable_diffusion.py index 76b4833f9c..e672067545 100644 --- a/invokeai/backend/model_management/models/stable_diffusion.py +++ b/invokeai/backend/model_management/models/stable_diffusion.py @@ -259,7 +259,7 @@ def _convert_ckpt_and_cache( """ app_config = InvokeAIAppConfig.get_config() - weights = app_config.root_path / model_config.path + weights = app_config.models_path / model_config.path config_file = app_config.root_path / model_config.config output_path = Path(output_path) diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index ea9efe1908..d548ec7026 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -153,7 +153,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): BufferBox, name="Log Messages", editable=False, - max_height=8, + max_height=15, ) self.nextrely += 1 @@ -399,7 +399,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): self.ok_button.hidden = True self.display() - # for communication with the subprocess + # TO DO: Spawn a worker thread, not a subprocess parent_conn, child_conn = Pipe() p = Process( target=process_and_execute, @@ -414,7 +414,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): self.subprocess_connection = parent_conn self.subprocess = p app.install_selections = InstallSelections() - # process_and_execute(app.opt, app.install_selections) def on_back(self): self.parentApp.switchFormPrevious() @@ -489,8 +488,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): # rebuild the form, saving and restoring some of the fields that need to be preserved. saved_messages = self.monitor.entry_widget.values - # autoload_dir = str(config.root_path / self.pipeline_models['autoload_directory'].value) - # autoscan = self.pipeline_models['autoscan_on_startup'].value app.main_form = app.addForm( "MAIN", @@ -544,13 +541,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): if downloads := section.get("download_ids"): selections.install_models.extend(downloads.value.split()) - # load directory and whether to scan on startup - # if self.parentApp.autoload_pending: - # selections.scan_directory = str(config.root_path / self.pipeline_models['autoload_directory'].value) - # self.parentApp.autoload_pending = False - # selections.autoscan_on_startup = self.pipeline_models['autoscan_on_startup'].value - - class AddModelApplication(npyscreen.NPSAppManaged): def __init__(self, opt): super().__init__() @@ -635,10 +625,14 @@ def _ask_user_for_pt_tui(model_path: Path, tui_conn: Connection) -> SchedulerPre # -------------------------------------------------------- def process_and_execute( - opt: Namespace, - selections: InstallSelections, - conn_out: Connection = None, + opt: Namespace, + selections: InstallSelections, + conn_out: Connection = None, ): + # need to reinitialize config in subprocess + config = InvokeAIAppConfig.get_config() + config.parse_args() + # set up so that stderr is sent to conn_out if conn_out: translator = StderrToMessage(conn_out) @@ -647,7 +641,7 @@ def process_and_execute( logger = InvokeAILogger.getLogger() logger.handlers.clear() logger.addHandler(logging.StreamHandler(translator)) - + installer = ModelInstall(config, prediction_type_helper=lambda x: ask_user_for_prediction_type(x, conn_out)) installer.install(selections) @@ -685,9 +679,6 @@ def select_and_download_models(opt: Namespace): precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device())) config.precision = precision helper = lambda x: ask_user_for_prediction_type(x) - # if do_listings(opt): - # pass - installer = ModelInstall(config, prediction_type_helper=helper) if opt.list_models: installer.list_models(opt.list_models) @@ -706,8 +697,6 @@ def select_and_download_models(opt: Namespace): # needed to support the probe() method running under a subprocess torch.multiprocessing.set_start_method("spawn") - # the third argument is needed in the Windows 11 environment in - # order to launch and resize a console window running this program set_min_terminal_size(MIN_COLS, MIN_LINES) installApp = AddModelApplication(opt) try: From d79d5a4ff72336babebf4d34f00368865d03c593 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 10:45:26 -0400 Subject: [PATCH 02/10] modest refactoring --- invokeai/backend/model_management/model_manager.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index d55ca55484..f5bbee18b4 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -423,7 +423,7 @@ class ModelManager(object): return (model_name, base_model, model_type) def _get_model_cache_path(self, model_path): - return self.app_config.models_path / ".cache" / hashlib.md5(str(model_path).encode()).hexdigest() + return self.resolve_model_path(".cache" + '/' + hashlib.md5(str(model_path).encode()).hexdigest()) @classmethod def initialize_model_config(cls, config_path: Path): @@ -456,7 +456,7 @@ class ModelManager(object): raise ModelNotFoundException(f"Model not found - {model_key}") model_config = self.models[model_key] - model_path = self.app_config.models_path / model_config.path + model_path = self.resolve_model_path(model_config.path) if not model_path.exists(): if model_class.save_to_config: @@ -623,7 +623,7 @@ class ModelManager(object): self.cache.uncache_model(cache_id) # if model inside invoke models folder - delete files - model_path = self.app_config.models_path / model_cfg.path + model_path = self.resolve_model_path(model_cfg.path) cache_path = self._get_model_cache_path(model_path) if cache_path.exists(): rmtree(str(cache_path)) @@ -782,7 +782,7 @@ class ModelManager(object): **submodel, ) checkpoint_path = self.app_config.root_path / info["path"] - old_diffusers_path = self.app_config.models_path / model.location + old_diffusers_path = self.resolve_model_path(model.location) new_diffusers_path = ( dest_directory or self.app_config.models_path / base_model.value / model_type.value ) / model_name @@ -810,6 +810,9 @@ class ModelManager(object): return result + def resolve_model_path(self, path: str) -> Path: + return self.app_config.models_path / path + def search_models(self, search_folder): self.logger.info(f"Finding Models In: {search_folder}") models_folder_ckpt = Path(search_folder).glob("**/*.ckpt") From 982a568349ac43d8bef5c90b685d4661777fc477 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 10:47:55 -0400 Subject: [PATCH 03/10] blackify pr --- invokeai/app/invocations/params.py | 6 +++--- invokeai/backend/install/model_install_backend.py | 2 +- invokeai/backend/model_management/model_manager.py | 8 ++++---- invokeai/frontend/install/model_install.py | 9 +++++---- 4 files changed, 13 insertions(+), 12 deletions(-) diff --git a/invokeai/app/invocations/params.py b/invokeai/app/invocations/params.py index 1a03baa7cc..513eb8762f 100644 --- a/invokeai/app/invocations/params.py +++ b/invokeai/app/invocations/params.py @@ -6,8 +6,7 @@ from pydantic import Field from invokeai.app.invocations.prompt import PromptOutput -from .baseinvocation import (BaseInvocation, BaseInvocationOutput, - InvocationConfig, InvocationContext) +from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationConfig, InvocationContext from .math import FloatOutput, IntOutput # Pass-through parameter nodes - used by subgraphs @@ -68,6 +67,7 @@ class ParamStringInvocation(BaseInvocation): def invoke(self, context: InvocationContext) -> StringOutput: return StringOutput(text=self.text) + class ParamPromptInvocation(BaseInvocation): """A prompt input parameter""" @@ -80,4 +80,4 @@ class ParamPromptInvocation(BaseInvocation): } def invoke(self, context: InvocationContext) -> PromptOutput: - return PromptOutput(prompt=self.prompt) \ No newline at end of file + return PromptOutput(prompt=self.prompt) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index a96c5ff776..67ca508fe1 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -348,7 +348,7 @@ class ModelInstall(object): if key in self.datasets: description = self.datasets[key].get("description") or description - rel_path = self.relative_to_root(path,self.config.models_path) + rel_path = self.relative_to_root(path, self.config.models_path) attributes = dict( path=str(rel_path), diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index f5bbee18b4..d377e86e31 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -423,7 +423,7 @@ class ModelManager(object): return (model_name, base_model, model_type) def _get_model_cache_path(self, model_path): - return self.resolve_model_path(".cache" + '/' + hashlib.md5(str(model_path).encode()).hexdigest()) + return self.resolve_model_path(".cache" + "/" + hashlib.md5(str(model_path).encode()).hexdigest()) @classmethod def initialize_model_config(cls, config_path: Path): @@ -889,13 +889,13 @@ class ModelManager(object): with Chdir(self.app_config.models_path): for model_key, model_config in list(self.models.items()): model_name, cur_base_model, cur_model_type = self.parse_key(model_key) - + # Patch for relative path bug in older models.yaml - paths should not # be starting with a hard-coded 'models'. This will also fix up # models.yaml when committed. - if model_config.path.startswith('models'): + if model_config.path.startswith("models"): model_config.path = str(Path(*Path(model_config.path).parts[1:])) - + model_path = self.app_config.models_path.absolute() / model_config.path if not model_path.exists(): model_class = MODEL_CLASSES[cur_base_model][cur_model_type] diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index d548ec7026..78423f6b78 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -541,6 +541,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): if downloads := section.get("download_ids"): selections.install_models.extend(downloads.value.split()) + class AddModelApplication(npyscreen.NPSAppManaged): def __init__(self, opt): super().__init__() @@ -625,9 +626,9 @@ def _ask_user_for_pt_tui(model_path: Path, tui_conn: Connection) -> SchedulerPre # -------------------------------------------------------- def process_and_execute( - opt: Namespace, - selections: InstallSelections, - conn_out: Connection = None, + opt: Namespace, + selections: InstallSelections, + conn_out: Connection = None, ): # need to reinitialize config in subprocess config = InvokeAIAppConfig.get_config() @@ -641,7 +642,7 @@ def process_and_execute( logger = InvokeAILogger.getLogger() logger.handlers.clear() logger.addHandler(logging.StreamHandler(translator)) - + installer = ModelInstall(config, prediction_type_helper=lambda x: ask_user_for_prediction_type(x, conn_out)) installer.install(selections) From 99daa979786d0889de5d8b7e3386ecdc946fb622 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 13:00:07 -0400 Subject: [PATCH 04/10] more refactoring; fixed place where rel conversion missed --- .../backend/install/model_install_backend.py | 14 +++++- .../backend/model_management/model_cache.py | 2 +- .../backend/model_management/model_manager.py | 46 ++++++++++--------- invokeai/frontend/install/model_install.py | 25 ---------- 4 files changed, 39 insertions(+), 48 deletions(-) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 67ca508fe1..f0dcb7585b 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -129,7 +129,7 @@ class ModelInstall(object): model_dict[key] = ModelLoadInfo(**value) # supplement with entries in models.yaml - installed_models = self.mgr.list_models() + installed_models = [x for x in self.mgr.list_models() if not self._is_autoloaded(x)] for md in installed_models: base = md["base_model"] @@ -148,6 +148,18 @@ class ModelInstall(object): ) return {x: model_dict[x] for x in sorted(model_dict.keys(), key=lambda y: model_dict[y].name.lower())} + def _is_autoloaded(self, model_info: dict) -> bool: + path = model_info.get("path") + if not path: + return False + for autodir in ['autoimport_dir','lora_dir','embedding_dir','controlnet_dir']: + if autodir_path := getattr(self.config, autodir): + autodir_path = self.config.root_path / autodir_path + print(f'{path} => {autodir_path}; is_relative={Path(path).is_relative_to(autodir_path)}',file=log) + if Path(path).is_relative_to(autodir_path): + return True + return False + def list_models(self, model_type): installed = self.mgr.list_models(model_type=model_type) print(f"Installed models of type `{model_type}`:") diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index a171c81e3c..a8c619b055 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -187,7 +187,7 @@ class ModelCache(object): # TODO: lock for no copies on simultaneous calls? cache_entry = self._cached_models.get(key, None) if cache_entry is None: - self.logger.info(f"Loading model {model_path}, type {base_model}:{model_type}:{submodel}") + self.logger.info(f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value}") # this will remove older cached models until # there is sufficient room to load the requested model diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index d377e86e31..a23902c407 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -586,7 +586,7 @@ class ModelManager(object): # expose paths as absolute to help web UI if path := model_dict.get("path"): - model_dict["path"] = str(self.app_config.root_path / path) + model_dict["path"] = str(self.resolve_model_path(path)) models.append(model_dict) return models @@ -654,10 +654,9 @@ class ModelManager(object): The returned dict has the same format as the dict returned by model_info(). """ - # relativize paths as they go in - this makes it easier to move the root directory around + # relativize paths as they go in - this makes it easier to move the models directory around if path := model_attributes.get("path"): - if Path(path).is_relative_to(self.app_config.models_path): - model_attributes["path"] = str(Path(path).relative_to(self.app_config.models_path)) + model_attributes["path"] = str(self.relative_model_path(Path(path))) model_class = MODEL_CLASSES[base_model][model_type] model_config = model_class.create_config(**model_attributes) @@ -715,7 +714,7 @@ class ModelManager(object): if not model_cfg: raise ModelNotFoundException(f"Unknown model: {model_key}") - old_path = self.app_config.root_path / model_cfg.path + old_path = self.resolve_model_path(model_cfg.path) new_name = new_name or model_name new_base = new_base or base_model new_key = self.create_key(new_name, new_base, model_type) @@ -725,11 +724,13 @@ class ModelManager(object): # if this is a model file/directory that we manage ourselves, we need to move it if old_path.is_relative_to(self.app_config.models_path): new_path = ( - self.app_config.root_path - / "models" - / BaseModelType(new_base).value - / ModelType(model_type).value - / new_name + self.resolve_model_path( + Path( + BaseModelType(new_base).value, + ModelType(model_type).value, + new_name, + ) + ) ) move(old_path, new_path) model_cfg.path = str(new_path.relative_to(self.app_config.models_path)) @@ -810,9 +811,15 @@ class ModelManager(object): return result - def resolve_model_path(self, path: str) -> Path: + def resolve_model_path(self, path: Union[Path,str]) -> Path: + """return relative paths based on configured models_path""" return self.app_config.models_path / path + def relative_model_path(self, model_path: Path) -> Path: + if model_path.is_relative_to(self.app_config.models_path): + model_path = model_path.relative_to(self.app_config.models_path) + return model_path + def search_models(self, search_folder): self.logger.info(f"Finding Models In: {search_folder}") models_folder_ckpt = Path(search_folder).glob("**/*.ckpt") @@ -896,7 +903,7 @@ class ModelManager(object): if model_config.path.startswith("models"): model_config.path = str(Path(*Path(model_config.path).parts[1:])) - model_path = self.app_config.models_path.absolute() / model_config.path + model_path = self.resolve_model_path(model_config.path).absolute() if not model_path.exists(): model_class = MODEL_CLASSES[cur_base_model][cur_model_type] if model_class.save_to_config: @@ -915,7 +922,7 @@ class ModelManager(object): if model_type is not None and cur_model_type != model_type: continue model_class = MODEL_CLASSES[cur_base_model][cur_model_type] - models_dir = self.app_config.models_path / cur_base_model.value / cur_model_type.value + models_dir = self.resolve_model_path(Path(cur_base_model.value, cur_model_type.value)) if not models_dir.exists(): continue # TODO: or create all folders? @@ -928,10 +935,8 @@ class ModelManager(object): try: if model_key in self.models: raise DuplicateModelException(f"Model with key {model_key} added twice") - - if model_path.is_relative_to(self.app_config.models_path): - model_path = model_path.relative_to(self.app_config.models_path) - + + model_path = self.relative_model_path(model_path) model_config: ModelConfigBase = model_class.probe_config(str(model_path)) self.models[model_key] = model_config new_models_found = True @@ -942,12 +947,11 @@ class ModelManager(object): except NotImplementedError as e: self.logger.warning(e) - imported_models = self.autoimport() - + imported_models = self.scan_autoimport_directory() if (new_models_found or imported_models) and self.config_path: self.commit() - def autoimport(self) -> Dict[str, AddModelResult]: + def scan_autoimport_directory(self) -> Dict[str, AddModelResult]: """ Scan the autoimport directory (if defined) and import new models, delete defunct models. """ @@ -981,7 +985,7 @@ class ModelManager(object): # LS: hacky # Patch in the SD VAE from core so that it is available for use by the UI try: - self.heuristic_import({config.models_path / "core/convert/sd-vae-ft-mse"}) + self.heuristic_import({self.resolve_model_path("core/convert/sd-vae-ft-mse")}) except: pass diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 78423f6b78..97065bf2d8 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -650,31 +650,6 @@ def process_and_execute( conn_out.send_bytes("*done*".encode("utf-8")) conn_out.close() - -def do_listings(opt) -> bool: - """List installed models of various sorts, and return - True if any were requested.""" - model_manager = ModelManager(config.model_conf_path) - if opt.list_models == "diffusers": - print("Diffuser models:") - model_manager.print_models() - elif opt.list_models == "controlnets": - print("Installed Controlnet Models:") - cnm = model_manager.list_controlnet_models() - print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]), prefix=" ")) - elif opt.list_models == "loras": - print("Installed LoRA/LyCORIS Models:") - cnm = model_manager.list_lora_models() - print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]), prefix=" ")) - elif opt.list_models == "tis": - print("Installed Textual Inversion Embeddings:") - cnm = model_manager.list_ti_models() - print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]), prefix=" ")) - else: - return False - return True - - # -------------------------------------------------------- def select_and_download_models(opt: Namespace): precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device())) From 0fb7328022b7fcbbc6a929456f5d4d7e9c6fd9b7 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 13:00:43 -0400 Subject: [PATCH 05/10] blackify code --- .../backend/install/model_install_backend.py | 3 +-- .../backend/model_management/model_manager.py | 16 +++++++--------- invokeai/frontend/install/model_install.py | 1 + 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index f0dcb7585b..7a9acd4235 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -152,10 +152,9 @@ class ModelInstall(object): path = model_info.get("path") if not path: return False - for autodir in ['autoimport_dir','lora_dir','embedding_dir','controlnet_dir']: + for autodir in ["autoimport_dir", "lora_dir", "embedding_dir", "controlnet_dir"]: if autodir_path := getattr(self.config, autodir): autodir_path = self.config.root_path / autodir_path - print(f'{path} => {autodir_path}; is_relative={Path(path).is_relative_to(autodir_path)}',file=log) if Path(path).is_relative_to(autodir_path): return True return False diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index a23902c407..81dc0e5d07 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -723,13 +723,11 @@ class ModelManager(object): # if this is a model file/directory that we manage ourselves, we need to move it if old_path.is_relative_to(self.app_config.models_path): - new_path = ( - self.resolve_model_path( - Path( - BaseModelType(new_base).value, - ModelType(model_type).value, - new_name, - ) + new_path = self.resolve_model_path( + Path( + BaseModelType(new_base).value, + ModelType(model_type).value, + new_name, ) ) move(old_path, new_path) @@ -811,7 +809,7 @@ class ModelManager(object): return result - def resolve_model_path(self, path: Union[Path,str]) -> Path: + def resolve_model_path(self, path: Union[Path, str]) -> Path: """return relative paths based on configured models_path""" return self.app_config.models_path / path @@ -935,7 +933,7 @@ class ModelManager(object): try: if model_key in self.models: raise DuplicateModelException(f"Model with key {model_key} added twice") - + model_path = self.relative_model_path(model_path) model_config: ModelConfigBase = model_class.probe_config(str(model_path)) self.models[model_key] = model_config diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 97065bf2d8..4272e05207 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -650,6 +650,7 @@ def process_and_execute( conn_out.send_bytes("*done*".encode("utf-8")) conn_out.close() + # -------------------------------------------------------- def select_and_download_models(opt: Namespace): precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device())) From 24b19166dd7e8921d92b4da1eb4de38d71b8e9bb Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 13:13:22 -0400 Subject: [PATCH 06/10] further refactoring --- invokeai/backend/model_management/model_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 81dc0e5d07..e381fef567 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -423,7 +423,7 @@ class ModelManager(object): return (model_name, base_model, model_type) def _get_model_cache_path(self, model_path): - return self.resolve_model_path(".cache" + "/" + hashlib.md5(str(model_path).encode()).hexdigest()) + return self.resolve_model_path(Path(".cache") / hashlib.md5(str(model_path).encode()).hexdigest()) @classmethod def initialize_model_config(cls, config_path: Path): From 73f3b7f84bd9bc6866923993b2084839b5f48eaa Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 17:32:33 -0400 Subject: [PATCH 07/10] remove dangling comment --- invokeai/backend/install/model_install_backend.py | 1 - 1 file changed, 1 deletion(-) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 7a9acd4235..7658b5b44f 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -103,7 +103,6 @@ class ModelInstall(object): access_token: str = None, ): self.config = config - # force model manager to be a singleton self.mgr = model_manager or ModelManager(config.model_conf_path) self.datasets = OmegaConf.load(Dataset_path) self.prediction_helper = prediction_type_helper From 844578ab883b64691d02f343d79632ad62e50a3b Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 30 Jul 2023 07:57:10 -0400 Subject: [PATCH 08/10] fix lora loading crash --- invokeai/backend/model_management/model_cache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index a8c619b055..931b6c6b34 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -187,7 +187,7 @@ class ModelCache(object): # TODO: lock for no copies on simultaneous calls? cache_entry = self._cached_models.get(key, None) if cache_entry is None: - self.logger.info(f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value}") + self.logger.info(f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value if submodel else ''}") # this will remove older cached models until # there is sufficient room to load the requested model From ac84a9f91578afd980947b5648d9965986ae6661 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 30 Jul 2023 08:05:05 -0400 Subject: [PATCH 09/10] reenable display of autoloaded models --- invokeai/backend/install/model_install_backend.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 7658b5b44f..e6e400ca70 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -128,7 +128,9 @@ class ModelInstall(object): model_dict[key] = ModelLoadInfo(**value) # supplement with entries in models.yaml - installed_models = [x for x in self.mgr.list_models() if not self._is_autoloaded(x)] + installed_models = [x for x in self.mgr.list_models()] + # suppresses autoloaded models + # installed_models = [x for x in self.mgr.list_models() if not self._is_autoloaded(x)] for md in installed_models: base = md["base_model"] From e20c4dc1e81709d021d4a7081bfe032f285a2c8e Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 30 Jul 2023 08:17:10 -0400 Subject: [PATCH 10/10] blackified --- invokeai/backend/model_management/model_cache.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 931b6c6b34..4c18068bae 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -187,7 +187,9 @@ class ModelCache(object): # TODO: lock for no copies on simultaneous calls? cache_entry = self._cached_models.get(key, None) if cache_entry is None: - self.logger.info(f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value if submodel else ''}") + self.logger.info( + f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value if submodel else ''}" + ) # this will remove older cached models until # there is sufficient room to load the requested model