diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 931b6c6b34..4c18068bae 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -187,7 +187,9 @@ class ModelCache(object): # TODO: lock for no copies on simultaneous calls? cache_entry = self._cached_models.get(key, None) if cache_entry is None: - self.logger.info(f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value if submodel else ''}") + self.logger.info( + f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value if submodel else ''}" + ) # this will remove older cached models until # there is sufficient room to load the requested model