diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 08aa2f3044..558de0f28c 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -68,7 +68,7 @@ class SDModelType(Enum): # distinguish them by class lora=LoraType textual_inversion=TIType - + class ModelStatus(Enum): unknown='unknown' not_loaded='not loaded' diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index f1a6dc18dc..4fbc80703a 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -176,6 +176,12 @@ class SDModelInfo(): revision: str = None _cache: ModelCache = None + def __enter__(self): + return self.context.__enter__() + + def __exit__(self,*args, **kwargs): + self.context.__exit__(*args, **kwargs) + @property def status(self)->ModelStatus: '''Return load status of this model as a model_cache.ModelStatus enum'''