mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
fix lora loading crash
This commit is contained in:
parent
73f3b7f84b
commit
844578ab88
@ -187,7 +187,7 @@ class ModelCache(object):
|
||||
# TODO: lock for no copies on simultaneous calls?
|
||||
cache_entry = self._cached_models.get(key, None)
|
||||
if cache_entry is None:
|
||||
self.logger.info(f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value}")
|
||||
self.logger.info(f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value if submodel else ''}")
|
||||
|
||||
# this will remove older cached models until
|
||||
# there is sufficient room to load the requested model
|
||||
|
Loading…
Reference in New Issue
Block a user