Switch the CLIP-L start model to use our hosted version - which is much smaller.

This commit is contained in:
Ryan Dick 2024-08-23 19:14:23 +00:00 committed by Brandon
parent 101de8c25d
commit 83f82c5ddf
2 changed files with 4 additions and 4 deletions

View File

@ -90,9 +90,9 @@ class ClipCheckpointModel(ModelLoader):
match submodel_type: match submodel_type:
case SubModelType.Tokenizer: case SubModelType.Tokenizer:
return CLIPTokenizer.from_pretrained(config.path) return CLIPTokenizer.from_pretrained(Path(config.path) / "tokenizer")
case SubModelType.TextEncoder: case SubModelType.TextEncoder:
return CLIPTextModel.from_pretrained(config.path) return CLIPTextModel.from_pretrained(Path(config.path) / "text_encoder")
raise ValueError( raise ValueError(
f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}" f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"

View File

@ -72,8 +72,8 @@ t5_8b_quantized_encoder = StarterModel(
clip_l_encoder = StarterModel( clip_l_encoder = StarterModel(
name="clip-vit-large-patch14", name="clip-vit-large-patch14",
base=BaseModelType.Any, base=BaseModelType.Any,
source="openai/clip-vit-large-patch14", source="InvokeAI/clip-vit-large-patch14-text-encoder::bfloat16",
description="CLIP-L text encoder (used in FLUX pipelines). ~3GB", description="CLIP-L text encoder (used in FLUX pipelines). ~250MB",
type=ModelType.CLIPEmbed, type=ModelType.CLIPEmbed,
) )