diff --git a/invokeai/app/api/routers/model_manager.py b/invokeai/app/api/routers/model_manager.py index 77335f5bac..2136c416b2 100644 --- a/invokeai/app/api/routers/model_manager.py +++ b/invokeai/app/api/routers/model_manager.py @@ -3,6 +3,7 @@ import io import pathlib +import shutil import traceback from copy import deepcopy from enum import Enum @@ -863,19 +864,20 @@ async def set_cache_size( # Try to apply the target state. cache.max_vram_cache_size = vram_new cache.max_cache_size = ram_new - app_config.max_cache_size = ram_new - app_config.max_vram_cache_size = vram_new + app_config.ram = ram_new + app_config.vram = vram_new if persist: app_config.write_file(new_config_path) shutil.move(new_config_path, config_path) except Exception as e: # If there was a failure, restore the initial state. - cache.max_vram_cache_size = vram_old cache.max_cache_size = ram_old - app_config.max_cache_size = ram_old - app_config.max_vram_cache_size = vram_old + cache.max_vram_cache_size = vram_old + app_config.ram = ram_old + app_config.vram = vram_old raise RuntimeError("Failed to update cache size") from e + return value @model_manager_router.get( diff --git a/invokeai/backend/model_manager/load/load_default.py b/invokeai/backend/model_manager/load/load_default.py index 13ba02a2a8..bf8e6a3ce2 100644 --- a/invokeai/backend/model_manager/load/load_default.py +++ b/invokeai/backend/model_manager/load/load_default.py @@ -66,6 +66,7 @@ class ModelLoader(ModelLoaderBase): return (model_base / config.path).resolve() def _load_and_cache(self, config: AnyModelConfig, submodel_type: Optional[SubModelType] = None) -> ModelLockerBase: + stats_name = ":".join([config.base, config.type, config.name, (submodel_type or "")]) try: return self._ram_cache.get(config.key, submodel_type, stats_name=stats_name) except IndexError: