From 459f0238ddd02d58936e2b0119e3b394f05da57c Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 3 Oct 2023 22:43:19 -0400 Subject: [PATCH] multiple minor fixes --- .../app/services/config/invokeai_config.py | 8 ------- invokeai/app/services/download_manager.py | 2 +- .../app/services/model_manager_service.py | 4 ++++ invokeai/backend/model_manager/cache.py | 2 -- invokeai/backend/model_manager/config.py | 5 ++++ invokeai/backend/model_manager/install.py | 23 +++++++++++++++++++ invokeai/backend/model_manager/loader.py | 10 ++++---- 7 files changed, 38 insertions(+), 16 deletions(-) diff --git a/invokeai/app/services/config/invokeai_config.py b/invokeai/app/services/config/invokeai_config.py index e7bd5b1177..d862dd844c 100644 --- a/invokeai/app/services/config/invokeai_config.py +++ b/invokeai/app/services/config/invokeai_config.py @@ -325,14 +325,6 @@ class InvokeAIAppConfig(InvokeAISettings): cls.singleton_init = kwargs return cls.singleton_config - @classmethod - def clear_config(cls): - """ - This removes the singleton InvokeAIAppConfig configuration object. - """ - cls.singleton_config = None - cls.singleton_config = None - @property def root_path(self) -> Path: """ diff --git a/invokeai/app/services/download_manager.py b/invokeai/app/services/download_manager.py index 4394a420b2..c333ced35f 100644 --- a/invokeai/app/services/download_manager.py +++ b/invokeai/app/services/download_manager.py @@ -139,7 +139,7 @@ class DownloadQueueService(DownloadQueueServiceBase): ) -> DownloadJobBase: # noqa D102 event_handlers = event_handlers or [] if self._event_bus: - event_handlers.append([self._event_bus.emit_model_download_event]) + event_handlers.append([self._event_bus.emit_model_download_event]) # BUG! This is not a valid method call return self._queue.create_download_job( source, destdir, diff --git a/invokeai/app/services/model_manager_service.py b/invokeai/app/services/model_manager_service.py index b35a076f3d..375bb13b35 100644 --- a/invokeai/app/services/model_manager_service.py +++ b/invokeai/app/services/model_manager_service.py @@ -7,6 +7,7 @@ from abc import ABC, abstractmethod from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union +from processor import Invoker from pydantic import Field, parse_obj_as from pydantic.networks import AnyHttpUrl @@ -345,6 +346,9 @@ class ModelManagerService(ModelManagerServiceBase): if self._event_bus: kwargs.update(event_handlers=[self._event_bus.emit_model_event]) self._loader = ModelLoad(config, **kwargs) + + def start(self, invoker: Invoker): + """Call automatically at process start.""" self._loader.installer.scan_models_directory() # synchronize new/deleted models found in models directory def get_model( diff --git a/invokeai/backend/model_manager/cache.py b/invokeai/backend/model_manager/cache.py index 5d2ab1e182..c83bb38ee0 100644 --- a/invokeai/backend/model_manager/cache.py +++ b/invokeai/backend/model_manager/cache.py @@ -95,7 +95,6 @@ class ModelCache(object): execution_device: torch.device = torch.device("cuda"), storage_device: torch.device = torch.device("cpu"), precision: torch.dtype = torch.float16, - sequential_offload: bool = False, lazy_offloading: bool = True, sha_chunksize: int = 16777216, logger: Logger = InvokeAILogger.get_logger(), @@ -106,7 +105,6 @@ class ModelCache(object): :param storage_device: Torch device to save inactive model in [torch.device('cpu')] :param precision: Precision for loaded models [torch.float16] :param lazy_offloading: Keep model in VRAM until another model needs to be loaded - :param sequential_offload: Conserve VRAM by loading and unloading each stage of the pipeline sequentially :param sha_chunksize: Chunksize to use when calculating sha256 model hash """ self.model_infos: Dict[str, ModelBase] = dict() diff --git a/invokeai/backend/model_manager/config.py b/invokeai/backend/model_manager/config.py index 9317a898ba..9a2c4e5d35 100644 --- a/invokeai/backend/model_manager/config.py +++ b/invokeai/backend/model_manager/config.py @@ -240,7 +240,12 @@ AnyModelConfig = Union[ TextualInversionConfig, ONNXSD1Config, ONNXSD2Config, + VaeCheckpointConfig, + VaeDiffusersConfig, + ControlNetDiffusersConfig, + ControlNetCheckpointConfig, ModelConfigBase, + IPAdapterConfig, ] diff --git a/invokeai/backend/model_manager/install.py b/invokeai/backend/model_manager/install.py index 2ea7e4e54a..67b455f23e 100644 --- a/invokeai/backend/model_manager/install.py +++ b/invokeai/backend/model_manager/install.py @@ -293,6 +293,26 @@ class ModelInstallBase(ABC): """ pass + @abstractmethod + def convert_model( + self, + key: str, + dest_directory: Optional[Path] = None, + ) -> ModelConfigBase: + """ + Convert a checkpoint file into a diffusers folder. + + It will delete the cached version ans well as the + original checkpoint file if it is in the models directory. + :param key: Unique key of model. + :dest_directory: Optional place to put converted file. If not specified, + will be stored in the `models_dir`. + + This will raise a ValueError unless the model is a checkpoint. + This will raise an UnknownModelException if key is unknown. + """ + pass + @abstractmethod def sync_model_path(self, key) -> ModelConfigBase: """ @@ -456,6 +476,9 @@ class ModelInstall(ModelInstallBase): if not path.exists(): new_path = path counter += 1 + self._logger.warning('Use shutil.move(), not Path.replace() here; hash before and after move') + # BUG! This won't work across filesystems. + # Rehash before and after moving. return old_path.replace(new_path) def _probe_model(self, model_path: Union[Path, str], overrides: Optional[Dict[str, Any]] = None) -> ModelProbeInfo: diff --git a/invokeai/backend/model_manager/loader.py b/invokeai/backend/model_manager/loader.py index 6e4b585acb..09080b3c8d 100644 --- a/invokeai/backend/model_manager/loader.py +++ b/invokeai/backend/model_manager/loader.py @@ -78,6 +78,7 @@ class ModelLoadBase(ABC): pass @property + @abstractmethod def config(self) -> InvokeAIAppConfig: """Return the config object used by this installer.""" pass @@ -100,8 +101,8 @@ class ModelLoadBase(ABC): @property @abstractmethod - def precision(self) -> str: - """Return 'float32' or 'float16'.""" + def precision(self) -> torch.dtype: + """Return torch.fp16 or torch.fp32.""" pass @abstractmethod @@ -168,7 +169,6 @@ class ModelLoad(ModelLoadBase): lazy_offloading=config.lazy_offload, execution_device=device, precision=dtype, - sequential_offload=config.sequential_guidance, logger=self._logger, ) @@ -178,8 +178,8 @@ class ModelLoad(ModelLoadBase): return self._store @property - def precision(self) -> str: - """Return 'float32' or 'float16'.""" + def precision(self) -> torch.fp32: + """Return torch.fp16 or torch.fp32.""" return self._cache.precision @property