mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
multiple minor fixes
This commit is contained in:
@ -325,14 +325,6 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
cls.singleton_init = kwargs
|
||||
return cls.singleton_config
|
||||
|
||||
@classmethod
|
||||
def clear_config(cls):
|
||||
"""
|
||||
This removes the singleton InvokeAIAppConfig configuration object.
|
||||
"""
|
||||
cls.singleton_config = None
|
||||
cls.singleton_config = None
|
||||
|
||||
@property
|
||||
def root_path(self) -> Path:
|
||||
"""
|
||||
|
@ -139,7 +139,7 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
) -> DownloadJobBase: # noqa D102
|
||||
event_handlers = event_handlers or []
|
||||
if self._event_bus:
|
||||
event_handlers.append([self._event_bus.emit_model_download_event])
|
||||
event_handlers.append([self._event_bus.emit_model_download_event]) # BUG! This is not a valid method call
|
||||
return self._queue.create_download_job(
|
||||
source,
|
||||
destdir,
|
||||
|
@ -7,6 +7,7 @@ from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
|
||||
|
||||
from processor import Invoker
|
||||
from pydantic import Field, parse_obj_as
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
|
||||
@ -345,6 +346,9 @@ class ModelManagerService(ModelManagerServiceBase):
|
||||
if self._event_bus:
|
||||
kwargs.update(event_handlers=[self._event_bus.emit_model_event])
|
||||
self._loader = ModelLoad(config, **kwargs)
|
||||
|
||||
def start(self, invoker: Invoker):
|
||||
"""Call automatically at process start."""
|
||||
self._loader.installer.scan_models_directory() # synchronize new/deleted models found in models directory
|
||||
|
||||
def get_model(
|
||||
|
@ -95,7 +95,6 @@ class ModelCache(object):
|
||||
execution_device: torch.device = torch.device("cuda"),
|
||||
storage_device: torch.device = torch.device("cpu"),
|
||||
precision: torch.dtype = torch.float16,
|
||||
sequential_offload: bool = False,
|
||||
lazy_offloading: bool = True,
|
||||
sha_chunksize: int = 16777216,
|
||||
logger: Logger = InvokeAILogger.get_logger(),
|
||||
@ -106,7 +105,6 @@ class ModelCache(object):
|
||||
:param storage_device: Torch device to save inactive model in [torch.device('cpu')]
|
||||
:param precision: Precision for loaded models [torch.float16]
|
||||
:param lazy_offloading: Keep model in VRAM until another model needs to be loaded
|
||||
:param sequential_offload: Conserve VRAM by loading and unloading each stage of the pipeline sequentially
|
||||
:param sha_chunksize: Chunksize to use when calculating sha256 model hash
|
||||
"""
|
||||
self.model_infos: Dict[str, ModelBase] = dict()
|
||||
|
@ -240,7 +240,12 @@ AnyModelConfig = Union[
|
||||
TextualInversionConfig,
|
||||
ONNXSD1Config,
|
||||
ONNXSD2Config,
|
||||
VaeCheckpointConfig,
|
||||
VaeDiffusersConfig,
|
||||
ControlNetDiffusersConfig,
|
||||
ControlNetCheckpointConfig,
|
||||
ModelConfigBase,
|
||||
IPAdapterConfig,
|
||||
]
|
||||
|
||||
|
||||
|
@ -293,6 +293,26 @@ class ModelInstallBase(ABC):
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def convert_model(
|
||||
self,
|
||||
key: str,
|
||||
dest_directory: Optional[Path] = None,
|
||||
) -> ModelConfigBase:
|
||||
"""
|
||||
Convert a checkpoint file into a diffusers folder.
|
||||
|
||||
It will delete the cached version ans well as the
|
||||
original checkpoint file if it is in the models directory.
|
||||
:param key: Unique key of model.
|
||||
:dest_directory: Optional place to put converted file. If not specified,
|
||||
will be stored in the `models_dir`.
|
||||
|
||||
This will raise a ValueError unless the model is a checkpoint.
|
||||
This will raise an UnknownModelException if key is unknown.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def sync_model_path(self, key) -> ModelConfigBase:
|
||||
"""
|
||||
@ -456,6 +476,9 @@ class ModelInstall(ModelInstallBase):
|
||||
if not path.exists():
|
||||
new_path = path
|
||||
counter += 1
|
||||
self._logger.warning('Use shutil.move(), not Path.replace() here; hash before and after move')
|
||||
# BUG! This won't work across filesystems.
|
||||
# Rehash before and after moving.
|
||||
return old_path.replace(new_path)
|
||||
|
||||
def _probe_model(self, model_path: Union[Path, str], overrides: Optional[Dict[str, Any]] = None) -> ModelProbeInfo:
|
||||
|
@ -78,6 +78,7 @@ class ModelLoadBase(ABC):
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def config(self) -> InvokeAIAppConfig:
|
||||
"""Return the config object used by this installer."""
|
||||
pass
|
||||
@ -100,8 +101,8 @@ class ModelLoadBase(ABC):
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def precision(self) -> str:
|
||||
"""Return 'float32' or 'float16'."""
|
||||
def precision(self) -> torch.dtype:
|
||||
"""Return torch.fp16 or torch.fp32."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
@ -168,7 +169,6 @@ class ModelLoad(ModelLoadBase):
|
||||
lazy_offloading=config.lazy_offload,
|
||||
execution_device=device,
|
||||
precision=dtype,
|
||||
sequential_offload=config.sequential_guidance,
|
||||
logger=self._logger,
|
||||
)
|
||||
|
||||
@ -178,8 +178,8 @@ class ModelLoad(ModelLoadBase):
|
||||
return self._store
|
||||
|
||||
@property
|
||||
def precision(self) -> str:
|
||||
"""Return 'float32' or 'float16'."""
|
||||
def precision(self) -> torch.fp32:
|
||||
"""Return torch.fp16 or torch.fp32."""
|
||||
return self._cache.precision
|
||||
|
||||
@property
|
||||
|
Reference in New Issue
Block a user