mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
record model_variant in t2i and clip_vision configs (#5989)
- Move base of t2i and clip_vision config models to DiffusersBase, which contains a field to record the model variant (e.g. "fp16") - This restore the ability to load fp16 t2i and clip_vision models - Also add defensive coding to load the vanilla model when the fp16 model has been replaced (or more likely, user's preferences changed since installation) Co-authored-by: Lincoln Stein <lstein@gmail.com>
This commit is contained in:
parent
3f61c51c3a
commit
c87497fd54
@ -331,7 +331,7 @@ class IPAdapterConfig(ModelConfigBase):
|
||||
return Tag(f"{ModelType.IPAdapter.value}.{ModelFormat.InvokeAI.value}")
|
||||
|
||||
|
||||
class CLIPVisionDiffusersConfig(ModelConfigBase):
|
||||
class CLIPVisionDiffusersConfig(DiffusersConfigBase):
|
||||
"""Model config for CLIPVision."""
|
||||
|
||||
type: Literal[ModelType.CLIPVision] = ModelType.CLIPVision
|
||||
@ -342,7 +342,7 @@ class CLIPVisionDiffusersConfig(ModelConfigBase):
|
||||
return Tag(f"{ModelType.CLIPVision.value}.{ModelFormat.Diffusers.value}")
|
||||
|
||||
|
||||
class T2IAdapterConfig(ModelConfigBase, ControlAdapterConfigBase):
|
||||
class T2IAdapterConfig(DiffusersConfigBase, ControlAdapterConfigBase):
|
||||
"""Model config for T2I."""
|
||||
|
||||
type: Literal[ModelType.T2IAdapter] = ModelType.T2IAdapter
|
||||
|
@ -36,7 +36,15 @@ class GenericDiffusersLoader(ModelLoader):
|
||||
if submodel_type is not None:
|
||||
raise Exception(f"There are no submodels in models of type {model_class}")
|
||||
variant = model_variant.value if model_variant else None
|
||||
result: AnyModel = model_class.from_pretrained(model_path, torch_dtype=self._torch_dtype, variant=variant) # type: ignore
|
||||
try:
|
||||
result: AnyModel = model_class.from_pretrained(model_path, torch_dtype=self._torch_dtype, variant=variant)
|
||||
except OSError as e:
|
||||
if variant and "no file named" in str(
|
||||
e
|
||||
): # try without the variant, just in case user's preferences changed
|
||||
result = model_class.from_pretrained(model_path, torch_dtype=self._torch_dtype)
|
||||
else:
|
||||
raise e
|
||||
return result
|
||||
|
||||
# TO DO: Add exception handling
|
||||
@ -63,7 +71,7 @@ class GenericDiffusersLoader(ModelLoader):
|
||||
assert class_name is not None
|
||||
result = self._hf_definition_to_type(module="transformers", class_name=class_name[0])
|
||||
if not class_name:
|
||||
raise InvalidModelConfigException("Unable to decifer Load Class based on given config.json")
|
||||
raise InvalidModelConfigException("Unable to decipher Load Class based on given config.json")
|
||||
except KeyError as e:
|
||||
raise InvalidModelConfigException("An expected config.json file is missing from this model.") from e
|
||||
assert result is not None
|
||||
|
@ -44,11 +44,20 @@ class StableDiffusionDiffusersModel(GenericDiffusersLoader):
|
||||
load_class = self.get_hf_load_class(model_path, submodel_type)
|
||||
variant = model_variant.value if model_variant else None
|
||||
model_path = model_path / submodel_type.value
|
||||
result: AnyModel = load_class.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=self._torch_dtype,
|
||||
variant=variant,
|
||||
) # type: ignore
|
||||
try:
|
||||
result: AnyModel = load_class.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=self._torch_dtype,
|
||||
variant=variant,
|
||||
)
|
||||
except OSError as e:
|
||||
if variant and "no file named" in str(
|
||||
e
|
||||
): # try without the variant, just in case user's preferences changed
|
||||
result = load_class.from_pretrained(model_path, torch_dtype=self._torch_dtype)
|
||||
else:
|
||||
raise e
|
||||
|
||||
return result
|
||||
|
||||
def _needs_conversion(self, config: AnyModelConfig, model_path: Path, dest_path: Path) -> bool:
|
||||
|
Loading…
Reference in New Issue
Block a user