fix lora loading crash

This commit is contained in:
Lincoln Stein 2023-07-30 07:57:10 -04:00
parent 73f3b7f84b
commit 844578ab88

View File

@ -187,7 +187,7 @@ class ModelCache(object):
# TODO: lock for no copies on simultaneous calls?
cache_entry = self._cached_models.get(key, None)
if cache_entry is None:
self.logger.info(f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value}")
self.logger.info(f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value if submodel else ''}")
# this will remove older cached models until
# there is sufficient room to load the requested model