diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index d4ba7efeda..4850b9670d 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -9,6 +9,7 @@ from compel.prompt_parser import (Blend, Conjunction, FlattenedPrompt, Fragment) from pydantic import BaseModel, Field +from ...backend.model_management.models import ModelNotFoundException from ...backend.model_management import BaseModelType, ModelType, SubModelType from ...backend.model_management.lora import ModelPatcher from ...backend.stable_diffusion.diffusion import InvokeAIDiffuserComponent @@ -86,10 +87,10 @@ class CompelInvocation(BaseInvocation): model_type=ModelType.TextualInversion, ).context.model ) - except Exception: + except ModelNotFoundException: # print(e) #import traceback - # print(traceback.format_exc()) + #print(traceback.format_exc()) print(f"Warn: trigger: \"{trigger}\" not found") with ModelPatcher.apply_lora_text_encoder(text_encoder_info.context.model, _lora_loader()),\ diff --git a/invokeai/backend/install/migrate_to_3.py b/invokeai/backend/install/migrate_to_3.py index fb3d964c7b..b32890f6b7 100644 --- a/invokeai/backend/install/migrate_to_3.py +++ b/invokeai/backend/install/migrate_to_3.py @@ -223,11 +223,11 @@ class MigrateTo3(object): repo_id = 'openai/clip-vit-large-patch14' self._migrate_pretrained(CLIPTokenizer, repo_id= repo_id, - dest= target_dir / 'clip-vit-large-patch14' / 'tokenizer', + dest= target_dir / 'clip-vit-large-patch14', **kwargs) self._migrate_pretrained(CLIPTextModel, repo_id = repo_id, - dest = target_dir / 'clip-vit-large-patch14' / 'text_encoder', + dest = target_dir / 'clip-vit-large-patch14', **kwargs) # sd-2 diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index 1eeee92fb7..e3e64940de 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -29,7 +29,7 @@ import invokeai.backend.util.logging as logger from invokeai.app.services.config import InvokeAIAppConfig from .model_manager import ModelManager -from .model_cache import ModelCache +from picklescan.scanner import scan_file_path from .models import BaseModelType, ModelVariantType try: @@ -1014,7 +1014,10 @@ def load_pipeline_from_original_stable_diffusion_ckpt( checkpoint = load_file(checkpoint_path) else: if scan_needed: - ModelCache.scan_model(checkpoint_path, checkpoint_path) + # scan model + scan_result = scan_file_path(checkpoint_path) + if scan_result.infected_files != 0: + raise "The model {checkpoint_path} is potentially infected by malware. Aborting import." checkpoint = torch.load(checkpoint_path) # sometimes there is a state_dict key and sometimes not diff --git a/invokeai/backend/model_management/lora.py b/invokeai/backend/model_management/lora.py index 5d27555ab3..ae576e39d9 100644 --- a/invokeai/backend/model_management/lora.py +++ b/invokeai/backend/model_management/lora.py @@ -655,6 +655,9 @@ class TextualInversionModel: else: result.embedding = next(iter(state_dict.values())) + if len(result.embedding.shape) == 1: + result.embedding = result.embedding.unsqueeze(0) + if not isinstance(result.embedding, torch.Tensor): raise ValueError(f"Invalid embeddings file: {file_path.name}") diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 8002ec9ba4..f15dcfac3c 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -249,7 +249,7 @@ from .model_cache import ModelCache, ModelLocker from .models import ( BaseModelType, ModelType, SubModelType, ModelError, SchedulerPredictionType, MODEL_CLASSES, - ModelConfigBase, + ModelConfigBase, ModelNotFoundException, ) # We are only starting to number the config file with release 3. @@ -409,7 +409,7 @@ class ModelManager(object): if model_key not in self.models: self.scan_models_directory(base_model=base_model, model_type=model_type) if model_key not in self.models: - raise Exception(f"Model not found - {model_key}") + raise ModelNotFoundException(f"Model not found - {model_key}") model_config = self.models[model_key] model_path = self.app_config.root_path / model_config.path @@ -421,7 +421,7 @@ class ModelManager(object): else: self.models.pop(model_key, None) - raise Exception(f"Model not found - {model_key}") + raise ModelNotFoundException(f"Model not found - {model_key}") # vae/movq override # TODO: diff --git a/invokeai/backend/model_management/models/__init__.py b/invokeai/backend/model_management/models/__init__.py index 87b0ad3c4e..00630eef62 100644 --- a/invokeai/backend/model_management/models/__init__.py +++ b/invokeai/backend/model_management/models/__init__.py @@ -2,7 +2,7 @@ import inspect from enum import Enum from pydantic import BaseModel from typing import Literal, get_origin -from .base import BaseModelType, ModelType, SubModelType, ModelBase, ModelConfigBase, ModelVariantType, SchedulerPredictionType, ModelError, SilenceWarnings +from .base import BaseModelType, ModelType, SubModelType, ModelBase, ModelConfigBase, ModelVariantType, SchedulerPredictionType, ModelError, SilenceWarnings, ModelNotFoundException from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model from .vae import VaeModel from .lora import LoRAModel diff --git a/invokeai/backend/model_management/models/base.py b/invokeai/backend/model_management/models/base.py index afa62b2e4f..57c02bce76 100644 --- a/invokeai/backend/model_management/models/base.py +++ b/invokeai/backend/model_management/models/base.py @@ -15,6 +15,9 @@ from contextlib import suppress from pydantic import BaseModel, Field from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union +class ModelNotFoundException(Exception): + pass + class BaseModelType(str, Enum): StableDiffusion1 = "sd-1" StableDiffusion2 = "sd-2" diff --git a/invokeai/backend/model_management/models/textual_inversion.py b/invokeai/backend/model_management/models/textual_inversion.py index 9a032218f0..4dcdbb24ba 100644 --- a/invokeai/backend/model_management/models/textual_inversion.py +++ b/invokeai/backend/model_management/models/textual_inversion.py @@ -8,6 +8,7 @@ from .base import ( ModelType, SubModelType, classproperty, + ModelNotFoundException, ) # TODO: naming from ..lora import TextualInversionModel as TextualInversionModelRaw @@ -37,8 +38,15 @@ class TextualInversionModel(ModelBase): if child_type is not None: raise Exception("There is no child models in textual inversion") + checkpoint_path = self.model_path + if os.path.isdir(checkpoint_path): + checkpoint_path = os.path.join(checkpoint_path, "learned_embeds.bin") + + if not os.path.exists(checkpoint_path): + raise ModelNotFoundException() + model = TextualInversionModelRaw.from_checkpoint( - file_path=self.model_path, + file_path=checkpoint_path, dtype=torch_dtype, ) diff --git a/invokeai/frontend/web/src/features/imageDeletion/components/DeleteImageModal.tsx b/invokeai/frontend/web/src/features/imageDeletion/components/DeleteImageModal.tsx index cdc8257488..8306437cc7 100644 --- a/invokeai/frontend/web/src/features/imageDeletion/components/DeleteImageModal.tsx +++ b/invokeai/frontend/web/src/features/imageDeletion/components/DeleteImageModal.tsx @@ -23,6 +23,7 @@ import { stateSelector } from 'app/store/store'; import { imageDeletionConfirmed, imageToDeleteCleared, + isModalOpenChanged, selectImageUsage, } from '../store/imageDeletionSlice'; @@ -63,6 +64,7 @@ const DeleteImageModal = () => { const handleClose = useCallback(() => { dispatch(imageToDeleteCleared()); + dispatch(isModalOpenChanged(false)); }, [dispatch]); const handleDelete = useCallback(() => { diff --git a/invokeai/frontend/web/src/features/imageDeletion/store/imageDeletionSlice.ts b/invokeai/frontend/web/src/features/imageDeletion/store/imageDeletionSlice.ts index 0daffba0d7..49630bcdb4 100644 --- a/invokeai/frontend/web/src/features/imageDeletion/store/imageDeletionSlice.ts +++ b/invokeai/frontend/web/src/features/imageDeletion/store/imageDeletionSlice.ts @@ -31,6 +31,7 @@ const imageDeletion = createSlice({ }, imageToDeleteCleared: (state) => { state.imageToDelete = null; + state.isModalOpen = false; }, }, }); diff --git a/invokeai/frontend/web/src/theme/components/textarea.ts b/invokeai/frontend/web/src/theme/components/textarea.ts index 85e6e37d3f..b737cf5e57 100644 --- a/invokeai/frontend/web/src/theme/components/textarea.ts +++ b/invokeai/frontend/web/src/theme/components/textarea.ts @@ -1,7 +1,28 @@ import { defineStyle, defineStyleConfig } from '@chakra-ui/react'; import { getInputOutlineStyles } from '../util/getInputOutlineStyles'; -const invokeAI = defineStyle((props) => getInputOutlineStyles(props)); +const invokeAI = defineStyle((props) => ({ + ...getInputOutlineStyles(props), + '::-webkit-scrollbar': { + display: 'initial', + }, + '::-webkit-resizer': { + backgroundImage: `linear-gradient(135deg, + var(--invokeai-colors-base-50) 0%, + var(--invokeai-colors-base-50) 70%, + var(--invokeai-colors-base-200) 70%, + var(--invokeai-colors-base-200) 100%)`, + }, + _dark: { + '::-webkit-resizer': { + backgroundImage: `linear-gradient(135deg, + var(--invokeai-colors-base-900) 0%, + var(--invokeai-colors-base-900) 70%, + var(--invokeai-colors-base-800) 70%, + var(--invokeai-colors-base-800) 100%)`, + }, + }, +})); export const textareaTheme = defineStyleConfig({ variants: {