diff --git a/invokeai/backend/model_management/__init__.py b/invokeai/backend/model_management/__init__.py index 6376585fe5..569428a9e7 100644 --- a/invokeai/backend/model_management/__init__.py +++ b/invokeai/backend/model_management/__init__.py @@ -1,15 +1,19 @@ """ Initialization file for invokeai.backend.model_management """ -from .model_manager import ModelManager, ModelInfo, AddModelResult, SchedulerPredictionType # noqa: F401 -from .model_cache import ModelCache # noqa: F401 +# This import must be first +from .model_manager import ModelManager, ModelInfo, AddModelResult, SchedulerPredictionType # noqa: F401 isort: split + from .lora import ModelPatcher, ONNXModelPatcher # noqa: F401 +from .model_cache import ModelCache # noqa: F401 from .models import ( # noqa: F401 BaseModelType, - ModelType, - SubModelType, - ModelVariantType, - ModelNotFoundException, DuplicateModelException, + ModelNotFoundException, + ModelType, + ModelVariantType, + SubModelType, ) -from .model_merge import ModelMerger, MergeInterpolationMethod # noqa: F401 + +# This import must be last +from .model_merge import ModelMerger, MergeInterpolationMethod # noqa: F401 isort: split diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index 8118e28abb..69d32a49c7 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -25,12 +25,7 @@ from typing import Optional, Union import requests import torch -from diffusers.models import ( - AutoencoderKL, - ControlNetModel, - PriorTransformer, - UNet2DConditionModel, -) +from diffusers.models import AutoencoderKL, ControlNetModel, PriorTransformer, UNet2DConditionModel from diffusers.pipelines.latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder from diffusers.pipelines.pipeline_utils import DiffusionPipeline @@ -64,6 +59,7 @@ from transformers import ( from invokeai.app.services.config import InvokeAIAppConfig from invokeai.backend.util.logging import InvokeAILogger + from .models import BaseModelType, ModelVariantType try: @@ -1203,8 +1199,8 @@ def download_from_original_stable_diffusion_ckpt( StableDiffusionControlNetPipeline, StableDiffusionInpaintPipeline, StableDiffusionPipeline, - StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, + StableDiffusionXLPipeline, StableUnCLIPImg2ImgPipeline, StableUnCLIPPipeline, ) diff --git a/invokeai/backend/model_management/lora.py b/invokeai/backend/model_management/lora.py index d0d8d4226c..bb44455c88 100644 --- a/invokeai/backend/model_management/lora.py +++ b/invokeai/backend/model_management/lora.py @@ -2,8 +2,8 @@ from __future__ import annotations import copy from contextlib import contextmanager -from typing import Optional, Dict, Tuple, Any, Union, List from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import torch @@ -14,7 +14,6 @@ from transformers import CLIPTextModel, CLIPTokenizer from .models.lora import LoRAModel - """ loras = [ (lora_model1, 0.7), @@ -307,9 +306,10 @@ class TextualInversionManager(BaseTextualInversionManager): class ONNXModelPatcher: - from .models.base import IAIOnnxRuntimeModel from diffusers import OnnxRuntimeModel + from .models.base import IAIOnnxRuntimeModel + @classmethod @contextmanager def apply_lora_unet( diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 6f3e5bd6a5..6d0f36ad8c 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -17,18 +17,19 @@ context. Use like this: """ import gc +import hashlib import os import sys -import hashlib from contextlib import suppress from dataclasses import dataclass, field from pathlib import Path -from typing import Dict, Union, types, Optional, Type, Any +from typing import Any, Dict, Optional, Type, Union, types import torch import invokeai.backend.util.logging as logger -from .models import BaseModelType, ModelType, SubModelType, ModelBase + +from .models import BaseModelType, ModelBase, ModelType, SubModelType # Maximum size of the cache, in gigs # Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index d87bc03fb7..e39ed6bf61 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -234,8 +234,8 @@ import textwrap import types from dataclasses import dataclass from pathlib import Path -from shutil import rmtree, move -from typing import Optional, List, Literal, Tuple, Union, Dict, Set, Callable +from shutil import move, rmtree +from typing import Callable, Dict, List, Literal, Optional, Set, Tuple, Union import torch import yaml @@ -246,20 +246,21 @@ from pydantic import BaseModel, Field import invokeai.backend.util.logging as logger from invokeai.app.services.config import InvokeAIAppConfig from invokeai.backend.util import CUDA_DEVICE, Chdir + from .model_cache import ModelCache, ModelLocker from .model_search import ModelSearch from .models import ( - BaseModelType, - ModelType, - SubModelType, - ModelError, - SchedulerPredictionType, MODEL_CLASSES, - ModelConfigBase, - ModelNotFoundException, - InvalidModelException, + BaseModelType, DuplicateModelException, + InvalidModelException, ModelBase, + ModelConfigBase, + ModelError, + ModelNotFoundException, + ModelType, + SchedulerPredictionType, + SubModelType, ) # We are only starting to number the config file with release 3. diff --git a/invokeai/backend/model_management/model_merge.py b/invokeai/backend/model_management/model_merge.py index a34d9b0e3e..59201d64d9 100644 --- a/invokeai/backend/model_management/model_merge.py +++ b/invokeai/backend/model_management/model_merge.py @@ -9,13 +9,14 @@ Copyright (c) 2023 Lincoln Stein and the InvokeAI Development Team import warnings from enum import Enum from pathlib import Path +from typing import List, Optional, Union + from diffusers import DiffusionPipeline from diffusers import logging as dlogging -from typing import List, Union, Optional import invokeai.backend.util.logging as logger -from ...backend.model_management import ModelManager, ModelType, BaseModelType, ModelVariantType, AddModelResult +from ...backend.model_management import AddModelResult, BaseModelType, ModelManager, ModelType, ModelVariantType class MergeInterpolationMethod(str, Enum): diff --git a/invokeai/backend/model_management/model_probe.py b/invokeai/backend/model_management/model_probe.py index 0d6f61e145..b8341b4499 100644 --- a/invokeai/backend/model_management/model_probe.py +++ b/invokeai/backend/model_management/model_probe.py @@ -1,24 +1,23 @@ import json -import torch -import safetensors.torch - from dataclasses import dataclass - -from diffusers import ModelMixin, ConfigMixin from pathlib import Path -from typing import Callable, Literal, Union, Dict, Optional +from typing import Callable, Dict, Literal, Optional, Union + +import safetensors.torch +import torch +from diffusers import ConfigMixin, ModelMixin from picklescan.scanner import scan_file_path from .models import ( BaseModelType, + InvalidModelException, ModelType, ModelVariantType, SchedulerPredictionType, SilenceWarnings, - InvalidModelException, ) -from .util import lora_token_vector_length from .models.base import read_checkpoint_meta +from .util import lora_token_vector_length @dataclass diff --git a/invokeai/backend/model_management/model_search.py b/invokeai/backend/model_management/model_search.py index 0a98091f4a..f4dd8b7739 100644 --- a/invokeai/backend/model_management/model_search.py +++ b/invokeai/backend/model_management/model_search.py @@ -5,8 +5,8 @@ Abstract base class for recursive directory search for models. import os from abc import ABC, abstractmethod -from typing import List, Set, types from pathlib import Path +from typing import List, Set, types import invokeai.backend.util.logging as logger