Merge branch 'main' into ryan/spandrel-upscale

This commit is contained in:
Ryan Dick
2024-07-09 13:47:11 -04:00
139 changed files with 1046 additions and 1092 deletions

View File

@ -1,6 +1,6 @@
"""Re-export frequently-used symbols from the Model Manager backend."""
from .config import (
from invokeai.backend.model_manager.config import (
AnyModel,
AnyModelConfig,
BaseModelType,
@ -13,9 +13,9 @@ from .config import (
SchedulerPredictionType,
SubModelType,
)
from .load import LoadedModel
from .probe import ModelProbe
from .search import ModelSearch
from invokeai.backend.model_manager.load import LoadedModel
from invokeai.backend.model_manager.probe import ModelProbe
from invokeai.backend.model_manager.search import ModelSearch
__all__ = [
"AnyModel",

View File

@ -30,11 +30,10 @@ from diffusers.models.modeling_utils import ModelMixin
from pydantic import BaseModel, ConfigDict, Discriminator, Field, Tag, TypeAdapter
from typing_extensions import Annotated, Any, Dict
from invokeai.app.invocations.constants import SCHEDULER_NAME_VALUES
from invokeai.app.util.misc import uuid_string
from invokeai.backend.model_hash.hash_validator import validate_hash
from ..raw_model import RawModel
from invokeai.backend.raw_model import RawModel
from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES
# ModelMixin is the base class for all diffusers and transformers models
# RawModel is the InvokeAI wrapper class for ip_adapters, loras, textual_inversion and onnx runtime

View File

@ -1,75 +0,0 @@
import ctypes
class Struct_mallinfo2(ctypes.Structure):
"""A ctypes Structure that matches the libc mallinfo2 struct.
Docs:
- https://man7.org/linux/man-pages/man3/mallinfo.3.html
- https://www.gnu.org/software/libc/manual/html_node/Statistics-of-Malloc.html
struct mallinfo2 {
size_t arena; /* Non-mmapped space allocated (bytes) */
size_t ordblks; /* Number of free chunks */
size_t smblks; /* Number of free fastbin blocks */
size_t hblks; /* Number of mmapped regions */
size_t hblkhd; /* Space allocated in mmapped regions (bytes) */
size_t usmblks; /* See below */
size_t fsmblks; /* Space in freed fastbin blocks (bytes) */
size_t uordblks; /* Total allocated space (bytes) */
size_t fordblks; /* Total free space (bytes) */
size_t keepcost; /* Top-most, releasable space (bytes) */
};
"""
_fields_ = [
("arena", ctypes.c_size_t),
("ordblks", ctypes.c_size_t),
("smblks", ctypes.c_size_t),
("hblks", ctypes.c_size_t),
("hblkhd", ctypes.c_size_t),
("usmblks", ctypes.c_size_t),
("fsmblks", ctypes.c_size_t),
("uordblks", ctypes.c_size_t),
("fordblks", ctypes.c_size_t),
("keepcost", ctypes.c_size_t),
]
def __str__(self):
s = ""
s += f"{'arena': <10}= {(self.arena/2**30):15.5f} # Non-mmapped space allocated (GB) (uordblks + fordblks)\n"
s += f"{'ordblks': <10}= {(self.ordblks): >15} # Number of free chunks\n"
s += f"{'smblks': <10}= {(self.smblks): >15} # Number of free fastbin blocks \n"
s += f"{'hblks': <10}= {(self.hblks): >15} # Number of mmapped regions \n"
s += f"{'hblkhd': <10}= {(self.hblkhd/2**30):15.5f} # Space allocated in mmapped regions (GB)\n"
s += f"{'usmblks': <10}= {(self.usmblks): >15} # Unused\n"
s += f"{'fsmblks': <10}= {(self.fsmblks/2**30):15.5f} # Space in freed fastbin blocks (GB)\n"
s += (
f"{'uordblks': <10}= {(self.uordblks/2**30):15.5f} # Space used by in-use allocations (non-mmapped)"
" (GB)\n"
)
s += f"{'fordblks': <10}= {(self.fordblks/2**30):15.5f} # Space in free blocks (non-mmapped) (GB)\n"
s += f"{'keepcost': <10}= {(self.keepcost/2**30):15.5f} # Top-most, releasable space (GB)\n"
return s
class LibcUtil:
"""A utility class for interacting with the C Standard Library (`libc`) via ctypes.
Note that this class will raise on __init__() if 'libc.so.6' can't be found. Take care to handle environments where
this shared library is not available.
TODO: Improve cross-OS compatibility of this class.
"""
def __init__(self):
self._libc = ctypes.cdll.LoadLibrary("libc.so.6")
def mallinfo2(self) -> Struct_mallinfo2:
"""Calls `libc` `mallinfo2`.
Docs: https://man7.org/linux/man-pages/man3/mallinfo.3.html
"""
mallinfo2 = self._libc.mallinfo2
mallinfo2.restype = Struct_mallinfo2
return mallinfo2()

View File

@ -6,10 +6,10 @@ Init file for the model loader.
from importlib import import_module
from pathlib import Path
from .load_base import LoadedModel, LoadedModelWithoutConfig, ModelLoaderBase
from .load_default import ModelLoader
from .model_cache.model_cache_default import ModelCache
from .model_loader_registry import ModelLoaderRegistry, ModelLoaderRegistryBase
from invokeai.backend.model_manager.load.load_base import LoadedModel, LoadedModelWithoutConfig, ModelLoaderBase
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_cache.model_cache_default import ModelCache
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry, ModelLoaderRegistryBase
# This registers the subclasses that implement loaders of specific model types
loaders = [x.stem for x in Path(Path(__file__).parent, "model_loaders").glob("*.py") if x.stem != "__init__"]

View File

@ -5,7 +5,7 @@ import psutil
import torch
from typing_extensions import Self
from ..util.libc_util import LibcUtil, Struct_mallinfo2
from invokeai.backend.model_manager.util.libc_util import LibcUtil, Struct_mallinfo2
GB = 2**30 # 1 GB

View File

@ -29,13 +29,17 @@ import torch
from invokeai.backend.model_manager import AnyModel, SubModelType
from invokeai.backend.model_manager.load.memory_snapshot import MemorySnapshot, get_pretty_snapshot_diff
from invokeai.backend.model_manager.load.model_cache.model_cache_base import (
CacheRecord,
CacheStats,
ModelCacheBase,
ModelLockerBase,
)
from invokeai.backend.model_manager.load.model_cache.model_locker import ModelLocker
from invokeai.backend.model_manager.load.model_util import calc_model_size_by_data
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.logging import InvokeAILogger
from .model_cache_base import CacheRecord, CacheStats, ModelCacheBase, ModelLockerBase
from .model_locker import ModelLocker
# Maximum size of the cache, in gigs
# Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously
DEFAULT_MAX_CACHE_SIZE = 6.0

View File

@ -7,8 +7,11 @@ from typing import Dict, Optional
import torch
from invokeai.backend.model_manager import AnyModel
from .model_cache_base import CacheRecord, ModelCacheBase, ModelLockerBase
from invokeai.backend.model_manager.load.model_cache.model_cache_base import (
CacheRecord,
ModelCacheBase,
ModelLockerBase,
)
class ModelLocker(ModelLockerBase):

View File

@ -18,7 +18,7 @@ Use like this:
from abc import ABC, abstractmethod
from typing import Callable, Dict, Optional, Tuple, Type, TypeVar
from ..config import (
from invokeai.backend.model_manager.config import (
AnyModelConfig,
BaseModelType,
ModelConfigBase,
@ -26,7 +26,7 @@ from ..config import (
ModelType,
SubModelType,
)
from . import ModelLoaderBase
from invokeai.backend.model_manager.load import ModelLoaderBase
class ModelLoaderRegistryBase(ABC):

View File

@ -13,9 +13,8 @@ from invokeai.backend.model_manager import (
ModelType,
)
from invokeai.backend.model_manager.config import ControlNetCheckpointConfig, SubModelType
from .. import ModelLoaderRegistry
from .generic_diffusers import GenericDiffusersLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ControlNet, format=ModelFormat.Diffusers)

View File

@ -18,8 +18,8 @@ from invokeai.backend.model_manager import (
SubModelType,
)
from invokeai.backend.model_manager.config import DiffusersConfigBase
from .. import ModelLoader, ModelLoaderRegistry
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.CLIPVision, format=ModelFormat.Diffusers)

View File

@ -15,9 +15,9 @@ from invokeai.backend.model_manager import (
ModelType,
SubModelType,
)
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_cache.model_cache_base import ModelCacheBase
from .. import ModelLoader, ModelLoaderRegistry
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.LoRA, format=ModelFormat.Diffusers)

View File

@ -13,9 +13,8 @@ from invokeai.backend.model_manager import (
ModelType,
SubModelType,
)
from .. import ModelLoaderRegistry
from .generic_diffusers import GenericDiffusersLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ONNX, format=ModelFormat.ONNX)

View File

@ -25,11 +25,10 @@ from invokeai.backend.model_manager.config import (
DiffusersConfigBase,
MainCheckpointConfig,
)
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
from invokeai.backend.util.silence_warnings import SilenceWarnings
from .. import ModelLoaderRegistry
from .generic_diffusers import GenericDiffusersLoader
VARIANT_TO_IN_CHANNEL_MAP = {
ModelVariantType.Normal: 4,
ModelVariantType.Depth: 5,

View File

@ -12,10 +12,10 @@ from invokeai.backend.model_manager import (
ModelType,
SubModelType,
)
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.textual_inversion import TextualInversionModelRaw
from .. import ModelLoader, ModelLoaderRegistry
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.TextualInversion, format=ModelFormat.EmbeddingFile)
@ModelLoaderRegistry.register(

View File

@ -12,9 +12,8 @@ from invokeai.backend.model_manager import (
ModelType,
)
from invokeai.backend.model_manager.config import AnyModel, SubModelType, VAECheckpointConfig
from .. import ModelLoaderRegistry
from .generic_diffusers import GenericDiffusersLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.VAE, format=ModelFormat.Diffusers)

View File

@ -39,7 +39,7 @@ def calc_model_size_by_data(logger: logging.Logger, model: AnyModel) -> int:
else:
# TODO(ryand): Promote this from a log to an exception once we are confident that we are handling all of the
# supported model types.
logger.error(
logger.warning(
f"Failed to calculate model size for unexpected model type: {type(model)}. The model will be treated as "
"having size 0."
)

View File

@ -17,16 +17,10 @@ from diffusers.utils import logging as dlogging
from invokeai.app.services.model_install import ModelInstallServiceBase
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
from invokeai.backend.model_manager import AnyModelConfig, BaseModelType, ModelType, ModelVariantType
from invokeai.backend.model_manager.config import MainDiffusersConfig
from invokeai.backend.util.devices import TorchDevice
from . import (
AnyModelConfig,
BaseModelType,
ModelType,
ModelVariantType,
)
from .config import MainDiffusersConfig
class MergeInterpolationMethod(str, Enum):
WeightedSum = "weighted_sum"

View File

@ -16,8 +16,8 @@ data = HuggingFaceMetadataFetch().from_id("<REPO_ID>")
assert isinstance(data, HuggingFaceMetadata)
"""
from .fetch import HuggingFaceMetadataFetch, ModelMetadataFetchBase
from .metadata_base import (
from invokeai.backend.model_manager.metadata.fetch import HuggingFaceMetadataFetch, ModelMetadataFetchBase
from invokeai.backend.model_manager.metadata.metadata_base import (
AnyModelRepoMetadata,
AnyModelRepoMetadataValidator,
BaseMetadata,

View File

@ -10,7 +10,7 @@ data = HuggingFaceMetadataFetch().from_id("<repo_id>")
assert isinstance(data, HuggingFaceMetadata)
"""
from .fetch_base import ModelMetadataFetchBase
from .huggingface import HuggingFaceMetadataFetch
from invokeai.backend.model_manager.metadata.fetch.fetch_base import ModelMetadataFetchBase
from invokeai.backend.model_manager.metadata.fetch.huggingface import HuggingFaceMetadataFetch
__all__ = ["ModelMetadataFetchBase", "HuggingFaceMetadataFetch"]

View File

@ -18,8 +18,11 @@ from pydantic.networks import AnyHttpUrl
from requests.sessions import Session
from invokeai.backend.model_manager import ModelRepoVariant
from ..metadata_base import AnyModelRepoMetadata, AnyModelRepoMetadataValidator, BaseMetadata
from invokeai.backend.model_manager.metadata.metadata_base import (
AnyModelRepoMetadata,
AnyModelRepoMetadataValidator,
BaseMetadata,
)
class ModelMetadataFetchBase(ABC):

View File

@ -25,14 +25,13 @@ from pydantic.networks import AnyHttpUrl
from requests.sessions import Session
from invokeai.backend.model_manager.config import ModelRepoVariant
from ..metadata_base import (
from invokeai.backend.model_manager.metadata.fetch.fetch_base import ModelMetadataFetchBase
from invokeai.backend.model_manager.metadata.metadata_base import (
AnyModelRepoMetadata,
HuggingFaceMetadata,
RemoteModelFile,
UnknownMetadataException,
)
from .fetch_base import ModelMetadataFetchBase
HF_MODEL_RE = r"https?://huggingface.co/([\w\-.]+/[\w\-.]+)"

View File

@ -24,8 +24,7 @@ from requests.sessions import Session
from typing_extensions import Annotated
from invokeai.backend.model_manager import ModelRepoVariant
from ..util import select_hf_files
from invokeai.backend.model_manager.util.select_hf_files import filter_files
class UnknownMetadataException(Exception):
@ -112,9 +111,7 @@ class HuggingFaceMetadata(ModelMetadataWithFiles):
session = session or Session()
configure_http_backend(backend_factory=lambda: session) # used in testing
paths = select_hf_files.filter_files(
[x.path for x in self.files], variant, subfolder
) # all files in the model
paths = filter_files([x.path for x in self.files], variant, subfolder) # all files in the model
prefix = f"{subfolder}/" if subfolder else ""
# the next step reads model_index.json to determine which subdirectories belong
# to the model

View File

@ -11,10 +11,7 @@ from picklescan.scanner import scan_file_path
import invokeai.backend.util.logging as logger
from invokeai.app.util.misc import uuid_string
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
from invokeai.backend.util.silence_warnings import SilenceWarnings
from .config import (
from invokeai.backend.model_manager.config import (
AnyModelConfig,
BaseModelType,
ControlAdapterDefaultSettings,
@ -28,7 +25,9 @@ from .config import (
ModelVariantType,
SchedulerPredictionType,
)
from .util.model_util import lora_token_vector_length, read_checkpoint_meta
from invokeai.backend.model_manager.util.model_util import lora_token_vector_length, read_checkpoint_meta
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
from invokeai.backend.util.silence_warnings import SilenceWarnings
CkptType = Dict[str | int, Any]

View File

@ -17,7 +17,7 @@ from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional, Set
from ..config import ModelRepoVariant
from invokeai.backend.model_manager.config import ModelRepoVariant
def filter_files(