Apply ruff rule to disallow all relative imports.

This commit is contained in:
Ryan Dick
2024-07-03 12:20:35 -04:00
parent 9da5925287
commit 1d449097cc
76 changed files with 221 additions and 274 deletions

View File

@ -1,6 +1,6 @@
"""Re-export frequently-used symbols from the Model Manager backend."""
from .config import (
from invokeai.backend.model_manager.config import (
AnyModel,
AnyModelConfig,
BaseModelType,
@ -13,9 +13,9 @@ from .config import (
SchedulerPredictionType,
SubModelType,
)
from .load import LoadedModel
from .probe import ModelProbe
from .search import ModelSearch
from invokeai.backend.model_manager.load import LoadedModel
from invokeai.backend.model_manager.probe import ModelProbe
from invokeai.backend.model_manager.search import ModelSearch
__all__ = [
"AnyModel",

View File

@ -6,10 +6,10 @@ Init file for the model loader.
from importlib import import_module
from pathlib import Path
from .load_base import LoadedModel, LoadedModelWithoutConfig, ModelLoaderBase
from .load_default import ModelLoader
from .model_cache.model_cache_default import ModelCache
from .model_loader_registry import ModelLoaderRegistry, ModelLoaderRegistryBase
from invokeai.backend.model_manager.load.load_base import LoadedModel, LoadedModelWithoutConfig, ModelLoaderBase
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_cache.model_cache_default import ModelCache
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry, ModelLoaderRegistryBase
# This registers the subclasses that implement loaders of specific model types
loaders = [x.stem for x in Path(Path(__file__).parent, "model_loaders").glob("*.py") if x.stem != "__init__"]

View File

@ -29,13 +29,17 @@ import torch
from invokeai.backend.model_manager import AnyModel, SubModelType
from invokeai.backend.model_manager.load.memory_snapshot import MemorySnapshot, get_pretty_snapshot_diff
from invokeai.backend.model_manager.load.model_cache.model_cache_base import (
CacheRecord,
CacheStats,
ModelCacheBase,
ModelLockerBase,
)
from invokeai.backend.model_manager.load.model_cache.model_locker import ModelLocker
from invokeai.backend.model_manager.load.model_util import calc_model_size_by_data
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.logging import InvokeAILogger
from .model_cache_base import CacheRecord, CacheStats, ModelCacheBase, ModelLockerBase
from .model_locker import ModelLocker
# Maximum size of the cache, in gigs
# Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously
DEFAULT_MAX_CACHE_SIZE = 6.0

View File

@ -7,8 +7,11 @@ from typing import Dict, Optional
import torch
from invokeai.backend.model_manager import AnyModel
from .model_cache_base import CacheRecord, ModelCacheBase, ModelLockerBase
from invokeai.backend.model_manager.load.model_cache.model_cache_base import (
CacheRecord,
ModelCacheBase,
ModelLockerBase,
)
class ModelLocker(ModelLockerBase):

View File

@ -26,8 +26,7 @@ from invokeai.backend.model_manager.config import (
ModelType,
SubModelType,
)
from . import ModelLoaderBase
from invokeai.backend.model_manager.load import ModelLoaderBase
class ModelLoaderRegistryBase(ABC):

View File

@ -14,8 +14,7 @@ from invokeai.backend.model_manager import (
)
from invokeai.backend.model_manager.config import ControlNetCheckpointConfig, SubModelType
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from .generic_diffusers import GenericDiffusersLoader
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ControlNet, format=ModelFormat.Diffusers)

View File

@ -14,8 +14,7 @@ from invokeai.backend.model_manager import (
SubModelType,
)
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from .generic_diffusers import GenericDiffusersLoader
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ONNX, format=ModelFormat.ONNX)

View File

@ -26,10 +26,9 @@ from invokeai.backend.model_manager.config import (
MainCheckpointConfig,
)
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
from invokeai.backend.util.silence_warnings import SilenceWarnings
from .generic_diffusers import GenericDiffusersLoader
VARIANT_TO_IN_CHANNEL_MAP = {
ModelVariantType.Normal: 4,
ModelVariantType.Depth: 5,

View File

@ -13,8 +13,7 @@ from invokeai.backend.model_manager import (
)
from invokeai.backend.model_manager.config import AnyModel, SubModelType, VAECheckpointConfig
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from .generic_diffusers import GenericDiffusersLoader
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.VAE, format=ModelFormat.Diffusers)

View File

@ -17,16 +17,10 @@ from diffusers.utils import logging as dlogging
from invokeai.app.services.model_install import ModelInstallServiceBase
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
from invokeai.backend.model_manager import AnyModelConfig, BaseModelType, ModelType, ModelVariantType
from invokeai.backend.model_manager.config import MainDiffusersConfig
from invokeai.backend.util.devices import TorchDevice
from . import (
AnyModelConfig,
BaseModelType,
ModelType,
ModelVariantType,
)
from .config import MainDiffusersConfig
class MergeInterpolationMethod(str, Enum):
WeightedSum = "weighted_sum"

View File

@ -16,8 +16,8 @@ data = HuggingFaceMetadataFetch().from_id("<REPO_ID>")
assert isinstance(data, HuggingFaceMetadata)
"""
from .fetch import HuggingFaceMetadataFetch, ModelMetadataFetchBase
from .metadata_base import (
from invokeai.backend.model_manager.metadata.fetch import HuggingFaceMetadataFetch, ModelMetadataFetchBase
from invokeai.backend.model_manager.metadata.metadata_base import (
AnyModelRepoMetadata,
AnyModelRepoMetadataValidator,
BaseMetadata,

View File

@ -10,7 +10,7 @@ data = HuggingFaceMetadataFetch().from_id("<repo_id>")
assert isinstance(data, HuggingFaceMetadata)
"""
from .fetch_base import ModelMetadataFetchBase
from .huggingface import HuggingFaceMetadataFetch
from invokeai.backend.model_manager.metadata.fetch.fetch_base import ModelMetadataFetchBase
from invokeai.backend.model_manager.metadata.fetch.huggingface import HuggingFaceMetadataFetch
__all__ = ["ModelMetadataFetchBase", "HuggingFaceMetadataFetch"]

View File

@ -25,6 +25,7 @@ from pydantic.networks import AnyHttpUrl
from requests.sessions import Session
from invokeai.backend.model_manager.config import ModelRepoVariant
from invokeai.backend.model_manager.metadata.fetch.fetch_base import ModelMetadataFetchBase
from invokeai.backend.model_manager.metadata.metadata_base import (
AnyModelRepoMetadata,
HuggingFaceMetadata,
@ -32,8 +33,6 @@ from invokeai.backend.model_manager.metadata.metadata_base import (
UnknownMetadataException,
)
from .fetch_base import ModelMetadataFetchBase
HF_MODEL_RE = r"https?://huggingface.co/([\w\-.]+/[\w\-.]+)"

View File

@ -10,9 +10,7 @@ from picklescan.scanner import scan_file_path
import invokeai.backend.util.logging as logger
from invokeai.app.util.misc import uuid_string
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash
from invokeai.backend.util.silence_warnings import SilenceWarnings
from .config import (
from invokeai.backend.model_manager.config import (
AnyModelConfig,
BaseModelType,
ControlAdapterDefaultSettings,
@ -26,7 +24,8 @@ from .config import (
ModelVariantType,
SchedulerPredictionType,
)
from .util.model_util import lora_token_vector_length, read_checkpoint_meta
from invokeai.backend.model_manager.util.model_util import lora_token_vector_length, read_checkpoint_meta
from invokeai.backend.util.silence_warnings import SilenceWarnings
CkptType = Dict[str | int, Any]