mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Get CLIPVision model download from HF working.
This commit is contained in:
parent
2c1100509f
commit
6d0ea42a94
@ -418,7 +418,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
|||||||
|
|
||||||
image_encoder_model_info = context.services.model_manager.get_model(
|
image_encoder_model_info = context.services.model_manager.get_model(
|
||||||
# TODO(ryand): Get this model_name from the IPAdapterField.
|
# TODO(ryand): Get this model_name from the IPAdapterField.
|
||||||
model_name="ip_adapter_clip_vision",
|
model_name="ip_adapter_sd_image_encoder",
|
||||||
model_type=ModelType.CLIPVision,
|
model_type=ModelType.CLIPVision,
|
||||||
base_model=BaseModelType.Any,
|
base_model=BaseModelType.Any,
|
||||||
context=context,
|
context=context,
|
||||||
|
@ -318,7 +318,6 @@ class ModelInstall(object):
|
|||||||
location = self._download_hf_pipeline(repo_id, staging) # pipeline
|
location = self._download_hf_pipeline(repo_id, staging) # pipeline
|
||||||
elif "unet/model.onnx" in files:
|
elif "unet/model.onnx" in files:
|
||||||
location = self._download_hf_model(repo_id, files, staging)
|
location = self._download_hf_model(repo_id, files, staging)
|
||||||
# TODO(ryand): Add special handling for ip_adapter?
|
|
||||||
else:
|
else:
|
||||||
for suffix in ["safetensors", "bin"]:
|
for suffix in ["safetensors", "bin"]:
|
||||||
if f"pytorch_lora_weights.{suffix}" in files:
|
if f"pytorch_lora_weights.{suffix}" in files:
|
||||||
@ -337,6 +336,11 @@ class ModelInstall(object):
|
|||||||
elif f"learned_embeds.{suffix}" in files:
|
elif f"learned_embeds.{suffix}" in files:
|
||||||
location = self._download_hf_model(repo_id, [f"learned_embeds.{suffix}"], staging)
|
location = self._download_hf_model(repo_id, [f"learned_embeds.{suffix}"], staging)
|
||||||
break
|
break
|
||||||
|
elif f"model.{suffix}" in files and "config.json" in files:
|
||||||
|
# This elif-condition is pretty fragile, but it is intended to handle CLIP Vision models hosted
|
||||||
|
# by InvokeAI for use with IP-Adapters.
|
||||||
|
files = ["config.json", f"model.{suffix}"]
|
||||||
|
location = self._download_hf_model(repo_id, files, staging)
|
||||||
if not location:
|
if not location:
|
||||||
logger.warning(f"Could not determine type of repo {repo_id}. Skipping install.")
|
logger.warning(f"Could not determine type of repo {repo_id}. Skipping install.")
|
||||||
return {}
|
return {}
|
||||||
|
@ -54,8 +54,7 @@ class ModelProbe(object):
|
|||||||
"StableDiffusionXLInpaintPipeline": ModelType.Main,
|
"StableDiffusionXLInpaintPipeline": ModelType.Main,
|
||||||
"AutoencoderKL": ModelType.Vae,
|
"AutoencoderKL": ModelType.Vae,
|
||||||
"ControlNetModel": ModelType.ControlNet,
|
"ControlNetModel": ModelType.ControlNet,
|
||||||
"IPAdapterModel": ModelType.IPAdapter,
|
"CLIPVisionModelWithProjection": ModelType.CLIPVision,
|
||||||
"CLIPVision": ModelType.CLIPVision,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -196,7 +195,12 @@ class ModelProbe(object):
|
|||||||
if config_path:
|
if config_path:
|
||||||
with open(config_path, "r") as file:
|
with open(config_path, "r") as file:
|
||||||
conf = json.load(file)
|
conf = json.load(file)
|
||||||
class_name = conf["_class_name"]
|
if "_class_name" in conf:
|
||||||
|
class_name = conf["_class_name"]
|
||||||
|
elif "architectures" in conf:
|
||||||
|
class_name = conf["architectures"][0]
|
||||||
|
else:
|
||||||
|
class_name = None
|
||||||
|
|
||||||
if class_name and (type := cls.CLASS2TYPE.get(class_name)):
|
if class_name and (type := cls.CLASS2TYPE.get(class_name)):
|
||||||
return type
|
return type
|
||||||
|
Loading…
Reference in New Issue
Block a user