diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index d4ba7efeda..4850b9670d 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -9,6 +9,7 @@ from compel.prompt_parser import (Blend, Conjunction, FlattenedPrompt, Fragment) from pydantic import BaseModel, Field +from ...backend.model_management.models import ModelNotFoundException from ...backend.model_management import BaseModelType, ModelType, SubModelType from ...backend.model_management.lora import ModelPatcher from ...backend.stable_diffusion.diffusion import InvokeAIDiffuserComponent @@ -86,10 +87,10 @@ class CompelInvocation(BaseInvocation): model_type=ModelType.TextualInversion, ).context.model ) - except Exception: + except ModelNotFoundException: # print(e) #import traceback - # print(traceback.format_exc()) + #print(traceback.format_exc()) print(f"Warn: trigger: \"{trigger}\" not found") with ModelPatcher.apply_lora_text_encoder(text_encoder_info.context.model, _lora_loader()),\ diff --git a/invokeai/backend/model_management/lora.py b/invokeai/backend/model_management/lora.py index 5d27555ab3..ae576e39d9 100644 --- a/invokeai/backend/model_management/lora.py +++ b/invokeai/backend/model_management/lora.py @@ -655,6 +655,9 @@ class TextualInversionModel: else: result.embedding = next(iter(state_dict.values())) + if len(result.embedding.shape) == 1: + result.embedding = result.embedding.unsqueeze(0) + if not isinstance(result.embedding, torch.Tensor): raise ValueError(f"Invalid embeddings file: {file_path.name}") diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 8002ec9ba4..f15dcfac3c 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -249,7 +249,7 @@ from .model_cache import ModelCache, ModelLocker from .models import ( BaseModelType, ModelType, SubModelType, ModelError, SchedulerPredictionType, MODEL_CLASSES, - ModelConfigBase, + ModelConfigBase, ModelNotFoundException, ) # We are only starting to number the config file with release 3. @@ -409,7 +409,7 @@ class ModelManager(object): if model_key not in self.models: self.scan_models_directory(base_model=base_model, model_type=model_type) if model_key not in self.models: - raise Exception(f"Model not found - {model_key}") + raise ModelNotFoundException(f"Model not found - {model_key}") model_config = self.models[model_key] model_path = self.app_config.root_path / model_config.path @@ -421,7 +421,7 @@ class ModelManager(object): else: self.models.pop(model_key, None) - raise Exception(f"Model not found - {model_key}") + raise ModelNotFoundException(f"Model not found - {model_key}") # vae/movq override # TODO: diff --git a/invokeai/backend/model_management/models/__init__.py b/invokeai/backend/model_management/models/__init__.py index 87b0ad3c4e..00630eef62 100644 --- a/invokeai/backend/model_management/models/__init__.py +++ b/invokeai/backend/model_management/models/__init__.py @@ -2,7 +2,7 @@ import inspect from enum import Enum from pydantic import BaseModel from typing import Literal, get_origin -from .base import BaseModelType, ModelType, SubModelType, ModelBase, ModelConfigBase, ModelVariantType, SchedulerPredictionType, ModelError, SilenceWarnings +from .base import BaseModelType, ModelType, SubModelType, ModelBase, ModelConfigBase, ModelVariantType, SchedulerPredictionType, ModelError, SilenceWarnings, ModelNotFoundException from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model from .vae import VaeModel from .lora import LoRAModel diff --git a/invokeai/backend/model_management/models/base.py b/invokeai/backend/model_management/models/base.py index afa62b2e4f..57c02bce76 100644 --- a/invokeai/backend/model_management/models/base.py +++ b/invokeai/backend/model_management/models/base.py @@ -15,6 +15,9 @@ from contextlib import suppress from pydantic import BaseModel, Field from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union +class ModelNotFoundException(Exception): + pass + class BaseModelType(str, Enum): StableDiffusion1 = "sd-1" StableDiffusion2 = "sd-2" diff --git a/invokeai/backend/model_management/models/textual_inversion.py b/invokeai/backend/model_management/models/textual_inversion.py index 9a032218f0..4dcdbb24ba 100644 --- a/invokeai/backend/model_management/models/textual_inversion.py +++ b/invokeai/backend/model_management/models/textual_inversion.py @@ -8,6 +8,7 @@ from .base import ( ModelType, SubModelType, classproperty, + ModelNotFoundException, ) # TODO: naming from ..lora import TextualInversionModel as TextualInversionModelRaw @@ -37,8 +38,15 @@ class TextualInversionModel(ModelBase): if child_type is not None: raise Exception("There is no child models in textual inversion") + checkpoint_path = self.model_path + if os.path.isdir(checkpoint_path): + checkpoint_path = os.path.join(checkpoint_path, "learned_embeds.bin") + + if not os.path.exists(checkpoint_path): + raise ModelNotFoundException() + model = TextualInversionModelRaw.from_checkpoint( - file_path=self.model_path, + file_path=checkpoint_path, dtype=torch_dtype, )