mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
check that model name matches format
This commit is contained in:
parent
3d85e769ce
commit
99c692f397
@ -289,19 +289,26 @@ class ModelCache(object):
|
||||
cache = self.cache
|
||||
key = self.key
|
||||
model = self.model
|
||||
|
||||
# NOTE that the model has to have the to() method in order for this
|
||||
# code to move it into GPU!
|
||||
if self.gpu_load and hasattr(model,'to'):
|
||||
cache.loaded_models.add(key)
|
||||
cache.locked_models[key] += 1
|
||||
|
||||
if cache.lazy_offloading:
|
||||
cache._offload_unlocked_models()
|
||||
|
||||
if model.device != cache.execution_device:
|
||||
cache.logger.debug(f'Moving {key} into {cache.execution_device}')
|
||||
with VRAMUsage() as mem:
|
||||
model.to(cache.execution_device) # move into GPU
|
||||
cache.logger.debug(f'GPU VRAM used for load: {(mem.vram_used/GIG):.2f} GB')
|
||||
cache.model_sizes[key] = mem.vram_used # more accurate size
|
||||
|
||||
cache.logger.debug(f'Locking {key} in {cache.execution_device}')
|
||||
cache._print_cuda_stats()
|
||||
|
||||
else:
|
||||
# in the event that the caller wants the model in RAM, we
|
||||
# move it into CPU if it is in GPU and not locked
|
||||
|
@ -1054,10 +1054,13 @@ class ModelManager(object):
|
||||
full_name = f"{model_name}/{model_type.name}"
|
||||
if full_name in self.config:
|
||||
return full_name
|
||||
if model_name in self.config:
|
||||
# special case - if diffusers requested, then allow name without type appended
|
||||
if model_type==SDModelType.diffusers \
|
||||
and model_name in self.config \
|
||||
and self.config[model_name].format=='diffusers':
|
||||
return model_name
|
||||
raise InvalidModelError(
|
||||
f'Neither "{model_name}" nor "{full_name}" are known model names. Please check your models.yaml file'
|
||||
f'"{full_name}" is not a known model name. Please check your models.yaml file'
|
||||
)
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user