From 9968ff2893c13b73f7ff031ea690f2344c8bab24 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 29 Jul 2023 10:30:27 -0400 Subject: [PATCH] fix relative model paths to be against config.models_path, not root --- invokeai/app/services/config.py | 3 +- .../backend/install/invokeai_configure.py | 2 +- .../backend/install/model_install_backend.py | 8 +++-- .../convert_ckpt_to_diffusers.py | 6 ++-- .../backend/model_management/model_manager.py | 29 ++++++++++------- .../models/stable_diffusion.py | 2 +- invokeai/frontend/install/model_install.py | 31 ++++++------------- 7 files changed, 39 insertions(+), 42 deletions(-) diff --git a/invokeai/app/services/config.py b/invokeai/app/services/config.py index 98855fe879..a3508e11ba 100644 --- a/invokeai/app/services/config.py +++ b/invokeai/app/services/config.py @@ -171,7 +171,6 @@ from pydantic import BaseSettings, Field, parse_obj_as from typing import ClassVar, Dict, List, Set, Literal, Union, get_origin, get_type_hints, get_args INIT_FILE = Path("invokeai.yaml") -MODEL_CORE = Path("models/core") DB_FILE = Path("invokeai.db") LEGACY_INIT_FILE = Path("invokeai.init") @@ -357,7 +356,7 @@ def _find_root() -> Path: venv = Path(os.environ.get("VIRTUAL_ENV") or ".") if os.environ.get("INVOKEAI_ROOT"): root = Path(os.environ.get("INVOKEAI_ROOT")).resolve() - elif any([(venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE, MODEL_CORE]]): + elif any([(venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE]]): root = (venv.parent).resolve() else: root = Path("~/invokeai").expanduser().resolve() diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index 972e6668c4..4bf2a484a1 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -181,7 +181,7 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th def download_conversion_models(): - target_dir = config.root_path / "models/core/convert" + target_dir = config.models_path / "core/convert" kwargs = dict() # for future use try: logger.info("Downloading core tokenizers and text encoders") diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index b3ab88b5dd..a96c5ff776 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -103,6 +103,7 @@ class ModelInstall(object): access_token: str = None, ): self.config = config + # force model manager to be a singleton self.mgr = model_manager or ModelManager(config.model_conf_path) self.datasets = OmegaConf.load(Dataset_path) self.prediction_helper = prediction_type_helper @@ -273,6 +274,7 @@ class ModelInstall(object): logger.error(f"Unable to download {url}. Skipping.") info = ModelProbe().heuristic_probe(location) dest = self.config.models_path / info.base_type.value / info.model_type.value / location.name + dest.parent.mkdir(parents=True, exist_ok=True) models_path = shutil.move(location, dest) # staged version will be garbage-collected at this time @@ -346,7 +348,7 @@ class ModelInstall(object): if key in self.datasets: description = self.datasets[key].get("description") or description - rel_path = self.relative_to_root(path) + rel_path = self.relative_to_root(path,self.config.models_path) attributes = dict( path=str(rel_path), @@ -386,8 +388,8 @@ class ModelInstall(object): attributes.update(dict(config=str(legacy_conf))) return attributes - def relative_to_root(self, path: Path) -> Path: - root = self.config.root_path + def relative_to_root(self, path: Path, root: None) -> Path: + root = root or self.config.root_path if path.is_relative_to(root): return path.relative_to(root) else: diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index 2c62b8b192..1640270dbf 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -63,7 +63,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from invokeai.backend.util.logging import InvokeAILogger -from invokeai.app.services.config import InvokeAIAppConfig, MODEL_CORE +from invokeai.app.services.config import InvokeAIAppConfig from picklescan.scanner import scan_file_path from .models import BaseModelType, ModelVariantType @@ -81,7 +81,7 @@ if is_accelerate_available(): from accelerate.utils import set_module_tensor_to_device logger = InvokeAILogger.getLogger(__name__) -CONVERT_MODEL_ROOT = InvokeAIAppConfig.get_config().root_path / MODEL_CORE / "convert" +CONVERT_MODEL_ROOT = InvokeAIAppConfig.get_config().models_path / "core/convert" def shave_segments(path, n_shave_prefix_segments=1): @@ -1281,7 +1281,7 @@ def download_from_original_stable_diffusion_ckpt( original_config = OmegaConf.load(original_config_file) if ( model_version == BaseModelType.StableDiffusion2 - and original_config["model"]["params"]["parameterization"] == "v" + and original_config["model"]["params"].get("parameterization") == "v" ): prediction_type = "v_prediction" upcast_attention = True diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 2a82061a97..d55ca55484 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -456,7 +456,7 @@ class ModelManager(object): raise ModelNotFoundException(f"Model not found - {model_key}") model_config = self.models[model_key] - model_path = self.app_config.root_path / model_config.path + model_path = self.app_config.models_path / model_config.path if not model_path.exists(): if model_class.save_to_config: @@ -623,7 +623,7 @@ class ModelManager(object): self.cache.uncache_model(cache_id) # if model inside invoke models folder - delete files - model_path = self.app_config.root_path / model_cfg.path + model_path = self.app_config.models_path / model_cfg.path cache_path = self._get_model_cache_path(model_path) if cache_path.exists(): rmtree(str(cache_path)) @@ -656,8 +656,8 @@ class ModelManager(object): """ # relativize paths as they go in - this makes it easier to move the root directory around if path := model_attributes.get("path"): - if Path(path).is_relative_to(self.app_config.root_path): - model_attributes["path"] = str(Path(path).relative_to(self.app_config.root_path)) + if Path(path).is_relative_to(self.app_config.models_path): + model_attributes["path"] = str(Path(path).relative_to(self.app_config.models_path)) model_class = MODEL_CLASSES[base_model][model_type] model_config = model_class.create_config(**model_attributes) @@ -732,7 +732,7 @@ class ModelManager(object): / new_name ) move(old_path, new_path) - model_cfg.path = str(new_path.relative_to(self.app_config.root_path)) + model_cfg.path = str(new_path.relative_to(self.app_config.models_path)) # clean up caches old_model_cache = self._get_model_cache_path(old_path) @@ -795,7 +795,7 @@ class ModelManager(object): info["path"] = ( str(new_diffusers_path) if dest_directory - else str(new_diffusers_path.relative_to(self.app_config.root_path)) + else str(new_diffusers_path.relative_to(self.app_config.models_path)) ) info.pop("config") @@ -883,10 +883,17 @@ class ModelManager(object): new_models_found = False self.logger.info(f"Scanning {self.app_config.models_path} for new models") - with Chdir(self.app_config.root_path): + with Chdir(self.app_config.models_path): for model_key, model_config in list(self.models.items()): model_name, cur_base_model, cur_model_type = self.parse_key(model_key) - model_path = self.app_config.root_path.absolute() / model_config.path + + # Patch for relative path bug in older models.yaml - paths should not + # be starting with a hard-coded 'models'. This will also fix up + # models.yaml when committed. + if model_config.path.startswith('models'): + model_config.path = str(Path(*Path(model_config.path).parts[1:])) + + model_path = self.app_config.models_path.absolute() / model_config.path if not model_path.exists(): model_class = MODEL_CLASSES[cur_base_model][cur_model_type] if model_class.save_to_config: @@ -919,8 +926,8 @@ class ModelManager(object): if model_key in self.models: raise DuplicateModelException(f"Model with key {model_key} added twice") - if model_path.is_relative_to(self.app_config.root_path): - model_path = model_path.relative_to(self.app_config.root_path) + if model_path.is_relative_to(self.app_config.models_path): + model_path = model_path.relative_to(self.app_config.models_path) model_config: ModelConfigBase = model_class.probe_config(str(model_path)) self.models[model_key] = model_config @@ -971,7 +978,7 @@ class ModelManager(object): # LS: hacky # Patch in the SD VAE from core so that it is available for use by the UI try: - self.heuristic_import({config.root_path / "models/core/convert/sd-vae-ft-mse"}) + self.heuristic_import({config.models_path / "core/convert/sd-vae-ft-mse"}) except: pass diff --git a/invokeai/backend/model_management/models/stable_diffusion.py b/invokeai/backend/model_management/models/stable_diffusion.py index 76b4833f9c..e672067545 100644 --- a/invokeai/backend/model_management/models/stable_diffusion.py +++ b/invokeai/backend/model_management/models/stable_diffusion.py @@ -259,7 +259,7 @@ def _convert_ckpt_and_cache( """ app_config = InvokeAIAppConfig.get_config() - weights = app_config.root_path / model_config.path + weights = app_config.models_path / model_config.path config_file = app_config.root_path / model_config.config output_path = Path(output_path) diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index ea9efe1908..d548ec7026 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -153,7 +153,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): BufferBox, name="Log Messages", editable=False, - max_height=8, + max_height=15, ) self.nextrely += 1 @@ -399,7 +399,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): self.ok_button.hidden = True self.display() - # for communication with the subprocess + # TO DO: Spawn a worker thread, not a subprocess parent_conn, child_conn = Pipe() p = Process( target=process_and_execute, @@ -414,7 +414,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): self.subprocess_connection = parent_conn self.subprocess = p app.install_selections = InstallSelections() - # process_and_execute(app.opt, app.install_selections) def on_back(self): self.parentApp.switchFormPrevious() @@ -489,8 +488,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): # rebuild the form, saving and restoring some of the fields that need to be preserved. saved_messages = self.monitor.entry_widget.values - # autoload_dir = str(config.root_path / self.pipeline_models['autoload_directory'].value) - # autoscan = self.pipeline_models['autoscan_on_startup'].value app.main_form = app.addForm( "MAIN", @@ -544,13 +541,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): if downloads := section.get("download_ids"): selections.install_models.extend(downloads.value.split()) - # load directory and whether to scan on startup - # if self.parentApp.autoload_pending: - # selections.scan_directory = str(config.root_path / self.pipeline_models['autoload_directory'].value) - # self.parentApp.autoload_pending = False - # selections.autoscan_on_startup = self.pipeline_models['autoscan_on_startup'].value - - class AddModelApplication(npyscreen.NPSAppManaged): def __init__(self, opt): super().__init__() @@ -635,10 +625,14 @@ def _ask_user_for_pt_tui(model_path: Path, tui_conn: Connection) -> SchedulerPre # -------------------------------------------------------- def process_and_execute( - opt: Namespace, - selections: InstallSelections, - conn_out: Connection = None, + opt: Namespace, + selections: InstallSelections, + conn_out: Connection = None, ): + # need to reinitialize config in subprocess + config = InvokeAIAppConfig.get_config() + config.parse_args() + # set up so that stderr is sent to conn_out if conn_out: translator = StderrToMessage(conn_out) @@ -647,7 +641,7 @@ def process_and_execute( logger = InvokeAILogger.getLogger() logger.handlers.clear() logger.addHandler(logging.StreamHandler(translator)) - + installer = ModelInstall(config, prediction_type_helper=lambda x: ask_user_for_prediction_type(x, conn_out)) installer.install(selections) @@ -685,9 +679,6 @@ def select_and_download_models(opt: Namespace): precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device())) config.precision = precision helper = lambda x: ask_user_for_prediction_type(x) - # if do_listings(opt): - # pass - installer = ModelInstall(config, prediction_type_helper=helper) if opt.list_models: installer.list_models(opt.list_models) @@ -706,8 +697,6 @@ def select_and_download_models(opt: Namespace): # needed to support the probe() method running under a subprocess torch.multiprocessing.set_start_method("spawn") - # the third argument is needed in the Windows 11 environment in - # order to launch and resize a console window running this program set_min_terminal_size(MIN_COLS, MIN_LINES) installApp = AddModelApplication(opt) try: