mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Switch the CLIP-L start model to use our hosted version - which is much smaller.
This commit is contained in:
parent
101de8c25d
commit
83f82c5ddf
@ -90,9 +90,9 @@ class ClipCheckpointModel(ModelLoader):
|
|||||||
|
|
||||||
match submodel_type:
|
match submodel_type:
|
||||||
case SubModelType.Tokenizer:
|
case SubModelType.Tokenizer:
|
||||||
return CLIPTokenizer.from_pretrained(config.path)
|
return CLIPTokenizer.from_pretrained(Path(config.path) / "tokenizer")
|
||||||
case SubModelType.TextEncoder:
|
case SubModelType.TextEncoder:
|
||||||
return CLIPTextModel.from_pretrained(config.path)
|
return CLIPTextModel.from_pretrained(Path(config.path) / "text_encoder")
|
||||||
|
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
|
f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
|
||||||
|
@ -72,8 +72,8 @@ t5_8b_quantized_encoder = StarterModel(
|
|||||||
clip_l_encoder = StarterModel(
|
clip_l_encoder = StarterModel(
|
||||||
name="clip-vit-large-patch14",
|
name="clip-vit-large-patch14",
|
||||||
base=BaseModelType.Any,
|
base=BaseModelType.Any,
|
||||||
source="openai/clip-vit-large-patch14",
|
source="InvokeAI/clip-vit-large-patch14-text-encoder::bfloat16",
|
||||||
description="CLIP-L text encoder (used in FLUX pipelines). ~3GB",
|
description="CLIP-L text encoder (used in FLUX pipelines). ~250MB",
|
||||||
type=ModelType.CLIPEmbed,
|
type=ModelType.CLIPEmbed,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user