From 99c692f39704b642914731062a14e7807d0025e2 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 9 May 2023 23:46:59 -0400 Subject: [PATCH] check that model name matches format --- invokeai/backend/model_management/model_cache.py | 7 +++++++ invokeai/backend/model_management/model_manager.py | 7 +++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 64f95699c9..08aa2f3044 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -289,19 +289,26 @@ class ModelCache(object): cache = self.cache key = self.key model = self.model + + # NOTE that the model has to have the to() method in order for this + # code to move it into GPU! if self.gpu_load and hasattr(model,'to'): cache.loaded_models.add(key) cache.locked_models[key] += 1 + if cache.lazy_offloading: cache._offload_unlocked_models() + if model.device != cache.execution_device: cache.logger.debug(f'Moving {key} into {cache.execution_device}') with VRAMUsage() as mem: model.to(cache.execution_device) # move into GPU cache.logger.debug(f'GPU VRAM used for load: {(mem.vram_used/GIG):.2f} GB') cache.model_sizes[key] = mem.vram_used # more accurate size + cache.logger.debug(f'Locking {key} in {cache.execution_device}') cache._print_cuda_stats() + else: # in the event that the caller wants the model in RAM, we # move it into CPU if it is in GPU and not locked diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 0905d5bf40..f4093ab116 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -1054,10 +1054,13 @@ class ModelManager(object): full_name = f"{model_name}/{model_type.name}" if full_name in self.config: return full_name - if model_name in self.config: + # special case - if diffusers requested, then allow name without type appended + if model_type==SDModelType.diffusers \ + and model_name in self.config \ + and self.config[model_name].format=='diffusers': return model_name raise InvalidModelError( - f'Neither "{model_name}" nor "{full_name}" are known model names. Please check your models.yaml file' + f'"{full_name}" is not a known model name. Please check your models.yaml file' )