diff --git a/invokeai/app/invocations/ip_adapter.py b/invokeai/app/invocations/ip_adapter.py index 64ffb248d2..84183d8a06 100644 --- a/invokeai/app/invocations/ip_adapter.py +++ b/invokeai/app/invocations/ip_adapter.py @@ -1,5 +1,3 @@ -from typing import Literal - from pydantic import BaseModel, Field from invokeai.app.invocations.baseinvocation import ( diff --git a/invokeai/backend/ip_adapter/ip_adapter.py b/invokeai/backend/ip_adapter/ip_adapter.py index 31ee815eae..d3bbfb6b37 100644 --- a/invokeai/backend/ip_adapter/ip_adapter.py +++ b/invokeai/backend/ip_adapter/ip_adapter.py @@ -1,7 +1,6 @@ # copied from https://github.com/tencent-ailab/IP-Adapter (Apache License 2.0) # and modified as needed -import os from contextlib import contextmanager from typing import Optional, Union @@ -18,8 +17,6 @@ from diffusers.models import UNet2DConditionModel from PIL import Image from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection -from invokeai.backend.model_management.models.base import calc_model_size_by_data - from .attention_processor import AttnProcessor, IPAttnProcessor from .resampler import Resampler diff --git a/invokeai/backend/model_management/model_probe.py b/invokeai/backend/model_management/model_probe.py index d9227fb722..046f596c1e 100644 --- a/invokeai/backend/model_management/model_probe.py +++ b/invokeai/backend/model_management/model_probe.py @@ -8,7 +8,6 @@ import torch from diffusers import ConfigMixin, ModelMixin from picklescan.scanner import scan_file_path -from invokeai.backend.model_management.models import BaseModelType from invokeai.backend.model_management.models.ip_adapter import IPAdapterModelFormat from .models import ( @@ -512,7 +511,9 @@ class ControlNetFolderProbe(FolderProbeBase): else ( BaseModelType.StableDiffusion2 if dimension == 1024 - else BaseModelType.StableDiffusionXL if dimension == 2048 else None + else BaseModelType.StableDiffusionXL + if dimension == 2048 + else None ) ) if not base_model: diff --git a/invokeai/backend/model_management/models/ip_adapter.py b/invokeai/backend/model_management/models/ip_adapter.py index 976711c720..70f42ec2a9 100644 --- a/invokeai/backend/model_management/models/ip_adapter.py +++ b/invokeai/backend/model_management/models/ip_adapter.py @@ -1,7 +1,7 @@ import os import typing from enum import Enum -from typing import Any, Literal, Optional +from typing import Literal, Optional import torch @@ -17,7 +17,6 @@ from invokeai.backend.model_management.models.base import ( ModelConfigBase, ModelType, SubModelType, - calc_model_size_by_fs, classproperty, ) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index dc903ef8ca..fe93d52ff9 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -26,10 +26,9 @@ from pydantic import Field from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.backend.ip_adapter.ip_adapter import IPAdapter, IPAdapterPlus +from invokeai.backend.ip_adapter.ip_adapter import IPAdapter from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ( ConditioningData, - IPAdapterConditioningInfo, ) from ..util import auto_detect_slice_size, normalize_device