From c87497fd54231d1d549f2535575857e73c444fa3 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 19 Mar 2024 16:14:12 -0400 Subject: [PATCH] record model_variant in t2i and clip_vision configs (#5989) - Move base of t2i and clip_vision config models to DiffusersBase, which contains a field to record the model variant (e.g. "fp16") - This restore the ability to load fp16 t2i and clip_vision models - Also add defensive coding to load the vanilla model when the fp16 model has been replaced (or more likely, user's preferences changed since installation) Co-authored-by: Lincoln Stein --- invokeai/backend/model_manager/config.py | 4 ++-- .../load/model_loaders/generic_diffusers.py | 12 ++++++++++-- .../load/model_loaders/stable_diffusion.py | 19 ++++++++++++++----- 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/invokeai/backend/model_manager/config.py b/invokeai/backend/model_manager/config.py index 2d5bb36a57..3b7f85cb16 100644 --- a/invokeai/backend/model_manager/config.py +++ b/invokeai/backend/model_manager/config.py @@ -331,7 +331,7 @@ class IPAdapterConfig(ModelConfigBase): return Tag(f"{ModelType.IPAdapter.value}.{ModelFormat.InvokeAI.value}") -class CLIPVisionDiffusersConfig(ModelConfigBase): +class CLIPVisionDiffusersConfig(DiffusersConfigBase): """Model config for CLIPVision.""" type: Literal[ModelType.CLIPVision] = ModelType.CLIPVision @@ -342,7 +342,7 @@ class CLIPVisionDiffusersConfig(ModelConfigBase): return Tag(f"{ModelType.CLIPVision.value}.{ModelFormat.Diffusers.value}") -class T2IAdapterConfig(ModelConfigBase, ControlAdapterConfigBase): +class T2IAdapterConfig(DiffusersConfigBase, ControlAdapterConfigBase): """Model config for T2I.""" type: Literal[ModelType.T2IAdapter] = ModelType.T2IAdapter diff --git a/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py b/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py index b29862e152..a630b24d0a 100644 --- a/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py +++ b/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py @@ -36,7 +36,15 @@ class GenericDiffusersLoader(ModelLoader): if submodel_type is not None: raise Exception(f"There are no submodels in models of type {model_class}") variant = model_variant.value if model_variant else None - result: AnyModel = model_class.from_pretrained(model_path, torch_dtype=self._torch_dtype, variant=variant) # type: ignore + try: + result: AnyModel = model_class.from_pretrained(model_path, torch_dtype=self._torch_dtype, variant=variant) + except OSError as e: + if variant and "no file named" in str( + e + ): # try without the variant, just in case user's preferences changed + result = model_class.from_pretrained(model_path, torch_dtype=self._torch_dtype) + else: + raise e return result # TO DO: Add exception handling @@ -63,7 +71,7 @@ class GenericDiffusersLoader(ModelLoader): assert class_name is not None result = self._hf_definition_to_type(module="transformers", class_name=class_name[0]) if not class_name: - raise InvalidModelConfigException("Unable to decifer Load Class based on given config.json") + raise InvalidModelConfigException("Unable to decipher Load Class based on given config.json") except KeyError as e: raise InvalidModelConfigException("An expected config.json file is missing from this model.") from e assert result is not None diff --git a/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py index 8d0dc90a2d..3fb2e29f60 100644 --- a/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py +++ b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py @@ -44,11 +44,20 @@ class StableDiffusionDiffusersModel(GenericDiffusersLoader): load_class = self.get_hf_load_class(model_path, submodel_type) variant = model_variant.value if model_variant else None model_path = model_path / submodel_type.value - result: AnyModel = load_class.from_pretrained( - model_path, - torch_dtype=self._torch_dtype, - variant=variant, - ) # type: ignore + try: + result: AnyModel = load_class.from_pretrained( + model_path, + torch_dtype=self._torch_dtype, + variant=variant, + ) + except OSError as e: + if variant and "no file named" in str( + e + ): # try without the variant, just in case user's preferences changed + result = load_class.from_pretrained(model_path, torch_dtype=self._torch_dtype) + else: + raise e + return result def _needs_conversion(self, config: AnyModelConfig, model_path: Path, dest_path: Path) -> bool: