Merge branch 'main' into ryan/spandrel-upscale

This commit is contained in:
Ryan Dick
2024-07-09 13:47:11 -04:00
139 changed files with 1046 additions and 1092 deletions

View File

@ -2,6 +2,11 @@
Initialization file for invokeai.backend.image_util methods.
"""
from .infill_methods.patchmatch import PatchMatch # noqa: F401
from .pngwriter import PngWriter, PromptFormatter, retrieve_metadata, write_metadata # noqa: F401
from .util import InitImageResizer, make_grid # noqa: F401
from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch # noqa: F401
from invokeai.backend.image_util.pngwriter import ( # noqa: F401
PngWriter,
PromptFormatter,
retrieve_metadata,
write_metadata,
)
from invokeai.backend.image_util.util import InitImageResizer, make_grid # noqa: F401

View File

@ -2,7 +2,7 @@ import torch
from torch import nn as nn
from torch.nn import functional as F
from .arch_util import default_init_weights, make_layer, pixel_unshuffle
from invokeai.backend.image_util.basicsr.arch_util import default_init_weights, make_layer, pixel_unshuffle
class ResidualDenseBlock(nn.Module):

View File

@ -4,7 +4,7 @@ import torch
import torch.nn as nn
import torch.nn.functional as F
from .blocks import FeatureFusionBlock, _make_scratch
from invokeai.backend.image_util.depth_anything.model.blocks import FeatureFusionBlock, _make_scratch
torchhub_path = Path(__file__).parent.parent / "torchhub"

View File

@ -8,11 +8,10 @@ import numpy as np
import onnxruntime as ort
from invokeai.app.services.config.config_default import get_config
from invokeai.backend.image_util.dw_openpose.onnxdet import inference_detector
from invokeai.backend.image_util.dw_openpose.onnxpose import inference_pose
from invokeai.backend.util.devices import TorchDevice
from .onnxdet import inference_detector
from .onnxpose import inference_pose
config = get_config()

View File

@ -11,9 +11,8 @@ from PIL import Image
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from invokeai.backend.ip_adapter.ip_attention_weights import IPAttentionWeights
from ..raw_model import RawModel
from .resampler import Resampler
from invokeai.backend.ip_adapter.resampler import Resampler
from invokeai.backend.raw_model import RawModel
class IPAdapterStateDict(TypedDict):

View File

@ -10,10 +10,9 @@ from safetensors.torch import load_file
from typing_extensions import Self
from invokeai.backend.model_manager import BaseModelType
from invokeai.backend.raw_model import RawModel
from invokeai.backend.util.devices import TorchDevice
from .raw_model import RawModel
class LoRALayerBase:
# rank: Optional[int]

View File

@ -1,6 +1,6 @@
"""Re-export frequently-used symbols from the Model Manager backend."""
from .config import (
from invokeai.backend.model_manager.config import (
AnyModel,
AnyModelConfig,
BaseModelType,
@ -13,9 +13,9 @@ from .config import (
SchedulerPredictionType,
SubModelType,
)
from .load import LoadedModel
from .probe import ModelProbe
from .search import ModelSearch
from invokeai.backend.model_manager.load import LoadedModel
from invokeai.backend.model_manager.probe import ModelProbe
from invokeai.backend.model_manager.search import ModelSearch
__all__ = [
"AnyModel",

View File

@ -30,11 +30,10 @@ from diffusers.models.modeling_utils import ModelMixin
from pydantic import BaseModel, ConfigDict, Discriminator, Field, Tag, TypeAdapter
from typing_extensions import Annotated, Any, Dict
from invokeai.app.invocations.constants import SCHEDULER_NAME_VALUES
from invokeai.app.util.misc import uuid_string
from invokeai.backend.model_hash.hash_validator import validate_hash
from ..raw_model import RawModel
from invokeai.backend.raw_model import RawModel
from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES
# ModelMixin is the base class for all diffusers and transformers models
# RawModel is the InvokeAI wrapper class for ip_adapters, loras, textual_inversion and onnx runtime

View File

@ -1,75 +0,0 @@
import ctypes
class Struct_mallinfo2(ctypes.Structure):
"""A ctypes Structure that matches the libc mallinfo2 struct.
Docs:
- https://man7.org/linux/man-pages/man3/mallinfo.3.html
- https://www.gnu.org/software/libc/manual/html_node/Statistics-of-Malloc.html
struct mallinfo2 {
size_t arena; /* Non-mmapped space allocated (bytes) */
size_t ordblks; /* Number of free chunks */
size_t smblks; /* Number of free fastbin blocks */
size_t hblks; /* Number of mmapped regions */
size_t hblkhd; /* Space allocated in mmapped regions (bytes) */
size_t usmblks; /* See below */
size_t fsmblks; /* Space in freed fastbin blocks (bytes) */
size_t uordblks; /* Total allocated space (bytes) */
size_t fordblks; /* Total free space (bytes) */
size_t keepcost; /* Top-most, releasable space (bytes) */
};
"""
_fields_ = [
("arena", ctypes.c_size_t),
("ordblks", ctypes.c_size_t),
("smblks", ctypes.c_size_t),
("hblks", ctypes.c_size_t),
("hblkhd", ctypes.c_size_t),
("usmblks", ctypes.c_size_t),
("fsmblks", ctypes.c_size_t),
("uordblks", ctypes.c_size_t),
("fordblks", ctypes.c_size_t),
("keepcost", ctypes.c_size_t),
]
def __str__(self):
s = ""
s += f"{'arena': <10}= {(self.arena/2**30):15.5f} # Non-mmapped space allocated (GB) (uordblks + fordblks)\n"
s += f"{'ordblks': <10}= {(self.ordblks): >15} # Number of free chunks\n"
s += f"{'smblks': <10}= {(self.smblks): >15} # Number of free fastbin blocks \n"
s += f"{'hblks': <10}= {(self.hblks): >15} # Number of mmapped regions \n"
s += f"{'hblkhd': <10}= {(self.hblkhd/2**30):15.5f} # Space allocated in mmapped regions (GB)\n"
s += f"{'usmblks': <10}= {(self.usmblks): >15} # Unused\n"
s += f"{'fsmblks': <10}= {(self.fsmblks/2**30):15.5f} # Space in freed fastbin blocks (GB)\n"
s += (
f"{'uordblks': <10}= {(self.uordblks/2**30):15.5f} # Space used by in-use allocations (non-mmapped)"
" (GB)\n"
)
s += f"{'fordblks': <10}= {(self.fordblks/2**30):15.5f} # Space in free blocks (non-mmapped) (GB)\n"
s += f"{'keepcost': <10}= {(self.keepcost/2**30):15.5f} # Top-most, releasable space (GB)\n"
return s
class LibcUtil:
"""A utility class for interacting with the C Standard Library (`libc`) via ctypes.
Note that this class will raise on __init__() if 'libc.so.6' can't be found. Take care to handle environments where
this shared library is not available.
TODO: Improve cross-OS compatibility of this class.
"""
def __init__(self):
self._libc = ctypes.cdll.LoadLibrary("libc.so.6")
def mallinfo2(self) -> Struct_mallinfo2:
"""Calls `libc` `mallinfo2`.
Docs: https://man7.org/linux/man-pages/man3/mallinfo.3.html
"""
mallinfo2 = self._libc.mallinfo2
mallinfo2.restype = Struct_mallinfo2
return mallinfo2()

View File

@ -6,10 +6,10 @@ Init file for the model loader.
from importlib import import_module
from pathlib import Path
from .load_base import LoadedModel, LoadedModelWithoutConfig, ModelLoaderBase
from .load_default import ModelLoader
from .model_cache.model_cache_default import ModelCache
from .model_loader_registry import ModelLoaderRegistry, ModelLoaderRegistryBase
from invokeai.backend.model_manager.load.load_base import LoadedModel, LoadedModelWithoutConfig, ModelLoaderBase
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_cache.model_cache_default import ModelCache
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry, ModelLoaderRegistryBase
# This registers the subclasses that implement loaders of specific model types
loaders = [x.stem for x in Path(Path(__file__).parent, "model_loaders").glob("*.py") if x.stem != "__init__"]

View File

@ -5,7 +5,7 @@ import psutil
import torch
from typing_extensions import Self
from ..util.libc_util import LibcUtil, Struct_mallinfo2
from invokeai.backend.model_manager.util.libc_util import LibcUtil, Struct_mallinfo2
GB = 2**30 # 1 GB

View File

@ -29,13 +29,17 @@ import torch
from invokeai.backend.model_manager import AnyModel, SubModelType
from invokeai.backend.model_manager.load.memory_snapshot import MemorySnapshot, get_pretty_snapshot_diff
from invokeai.backend.model_manager.load.model_cache.model_cache_base import (
CacheRecord,
CacheStats,
ModelCacheBase,
ModelLockerBase,
)
from invokeai.backend.model_manager.load.model_cache.model_locker import ModelLocker
from invokeai.backend.model_manager.load.model_util import calc_model_size_by_data
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.logging import InvokeAILogger
from .model_cache_base import CacheRecord, CacheStats, ModelCacheBase, ModelLockerBase
from .model_locker import ModelLocker
# Maximum size of the cache, in gigs
# Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously
DEFAULT_MAX_CACHE_SIZE = 6.0

View File

@ -7,8 +7,11 @@ from typing import Dict, Optional
import torch
from invokeai.backend.model_manager import AnyModel
from .model_cache_base import CacheRecord, ModelCacheBase, ModelLockerBase
from invokeai.backend.model_manager.load.model_cache.model_cache_base import (
CacheRecord,
ModelCacheBase,
ModelLockerBase,
)
class ModelLocker(ModelLockerBase):

View File

@ -18,7 +18,7 @@ Use like this:
from abc import ABC, abstractmethod
from typing import Callable, Dict, Optional, Tuple, Type, TypeVar
from ..config import (
from invokeai.backend.model_manager.config import (
AnyModelConfig,
BaseModelType,
ModelConfigBase,
@ -26,7 +26,7 @@ from ..config import (
ModelType,
SubModelType,
)
from . import ModelLoaderBase
from invokeai.backend.model_manager.load import ModelLoaderBase
class ModelLoaderRegistryBase(ABC):

View File

@ -13,9 +13,8 @@ from invokeai.backend.model_manager import (
ModelType,
)
from invokeai.backend.model_manager.config import ControlNetCheckpointConfig, SubModelType
from .. import ModelLoaderRegistry
from .generic_diffusers import GenericDiffusersLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ControlNet, format=ModelFormat.Diffusers)

View File

@ -18,8 +18,8 @@ from invokeai.backend.model_manager import (
SubModelType,
)
from invokeai.backend.model_manager.config import DiffusersConfigBase
from .. import ModelLoader, ModelLoaderRegistry
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.CLIPVision, format=ModelFormat.Diffusers)

View File

@ -15,9 +15,9 @@ from invokeai.backend.model_manager import (
ModelType,
SubModelType,
)
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_cache.model_cache_base import ModelCacheBase
from .. import ModelLoader, ModelLoaderRegistry
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.LoRA, format=ModelFormat.Diffusers)

View File

@ -13,9 +13,8 @@ from invokeai.backend.model_manager import (
ModelType,
SubModelType,
)
from .. import ModelLoaderRegistry
from .generic_diffusers import GenericDiffusersLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ONNX, format=ModelFormat.ONNX)

View File

@ -25,11 +25,10 @@ from invokeai.backend.model_manager.config import (
DiffusersConfigBase,
MainCheckpointConfig,
)
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
from invokeai.backend.util.silence_warnings import SilenceWarnings
from .. import ModelLoaderRegistry
from .generic_diffusers import GenericDiffusersLoader
VARIANT_TO_IN_CHANNEL_MAP = {
ModelVariantType.Normal: 4,
ModelVariantType.Depth: 5,

View File

@ -12,10 +12,10 @@ from invokeai.backend.model_manager import (
ModelType,
SubModelType,
)
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.textual_inversion import TextualInversionModelRaw
from .. import ModelLoader, ModelLoaderRegistry
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.TextualInversion, format=ModelFormat.EmbeddingFile)
@ModelLoaderRegistry.register(

View File

@ -12,9 +12,8 @@ from invokeai.backend.model_manager import (
ModelType,
)
from invokeai.backend.model_manager.config import AnyModel, SubModelType, VAECheckpointConfig
from .. import ModelLoaderRegistry
from .generic_diffusers import GenericDiffusersLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.VAE, format=ModelFormat.Diffusers)

View File

@ -39,7 +39,7 @@ def calc_model_size_by_data(logger: logging.Logger, model: AnyModel) -> int:
else:
# TODO(ryand): Promote this from a log to an exception once we are confident that we are handling all of the
# supported model types.
logger.error(
logger.warning(
f"Failed to calculate model size for unexpected model type: {type(model)}. The model will be treated as "
"having size 0."
)

View File

@ -17,16 +17,10 @@ from diffusers.utils import logging as dlogging
from invokeai.app.services.model_install import ModelInstallServiceBase
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
from invokeai.backend.model_manager import AnyModelConfig, BaseModelType, ModelType, ModelVariantType
from invokeai.backend.model_manager.config import MainDiffusersConfig
from invokeai.backend.util.devices import TorchDevice
from . import (
AnyModelConfig,
BaseModelType,
ModelType,
ModelVariantType,
)
from .config import MainDiffusersConfig
class MergeInterpolationMethod(str, Enum):
WeightedSum = "weighted_sum"

View File

@ -16,8 +16,8 @@ data = HuggingFaceMetadataFetch().from_id("<REPO_ID>")
assert isinstance(data, HuggingFaceMetadata)
"""
from .fetch import HuggingFaceMetadataFetch, ModelMetadataFetchBase
from .metadata_base import (
from invokeai.backend.model_manager.metadata.fetch import HuggingFaceMetadataFetch, ModelMetadataFetchBase
from invokeai.backend.model_manager.metadata.metadata_base import (
AnyModelRepoMetadata,
AnyModelRepoMetadataValidator,
BaseMetadata,

View File

@ -10,7 +10,7 @@ data = HuggingFaceMetadataFetch().from_id("<repo_id>")
assert isinstance(data, HuggingFaceMetadata)
"""
from .fetch_base import ModelMetadataFetchBase
from .huggingface import HuggingFaceMetadataFetch
from invokeai.backend.model_manager.metadata.fetch.fetch_base import ModelMetadataFetchBase
from invokeai.backend.model_manager.metadata.fetch.huggingface import HuggingFaceMetadataFetch
__all__ = ["ModelMetadataFetchBase", "HuggingFaceMetadataFetch"]

View File

@ -18,8 +18,11 @@ from pydantic.networks import AnyHttpUrl
from requests.sessions import Session
from invokeai.backend.model_manager import ModelRepoVariant
from ..metadata_base import AnyModelRepoMetadata, AnyModelRepoMetadataValidator, BaseMetadata
from invokeai.backend.model_manager.metadata.metadata_base import (
AnyModelRepoMetadata,
AnyModelRepoMetadataValidator,
BaseMetadata,
)
class ModelMetadataFetchBase(ABC):

View File

@ -25,14 +25,13 @@ from pydantic.networks import AnyHttpUrl
from requests.sessions import Session
from invokeai.backend.model_manager.config import ModelRepoVariant
from ..metadata_base import (
from invokeai.backend.model_manager.metadata.fetch.fetch_base import ModelMetadataFetchBase
from invokeai.backend.model_manager.metadata.metadata_base import (
AnyModelRepoMetadata,
HuggingFaceMetadata,
RemoteModelFile,
UnknownMetadataException,
)
from .fetch_base import ModelMetadataFetchBase
HF_MODEL_RE = r"https?://huggingface.co/([\w\-.]+/[\w\-.]+)"

View File

@ -24,8 +24,7 @@ from requests.sessions import Session
from typing_extensions import Annotated
from invokeai.backend.model_manager import ModelRepoVariant
from ..util import select_hf_files
from invokeai.backend.model_manager.util.select_hf_files import filter_files
class UnknownMetadataException(Exception):
@ -112,9 +111,7 @@ class HuggingFaceMetadata(ModelMetadataWithFiles):
session = session or Session()
configure_http_backend(backend_factory=lambda: session) # used in testing
paths = select_hf_files.filter_files(
[x.path for x in self.files], variant, subfolder
) # all files in the model
paths = filter_files([x.path for x in self.files], variant, subfolder) # all files in the model
prefix = f"{subfolder}/" if subfolder else ""
# the next step reads model_index.json to determine which subdirectories belong
# to the model

View File

@ -11,10 +11,7 @@ from picklescan.scanner import scan_file_path
import invokeai.backend.util.logging as logger
from invokeai.app.util.misc import uuid_string
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
from invokeai.backend.util.silence_warnings import SilenceWarnings
from .config import (
from invokeai.backend.model_manager.config import (
AnyModelConfig,
BaseModelType,
ControlAdapterDefaultSettings,
@ -28,7 +25,9 @@ from .config import (
ModelVariantType,
SchedulerPredictionType,
)
from .util.model_util import lora_token_vector_length, read_checkpoint_meta
from invokeai.backend.model_manager.util.model_util import lora_token_vector_length, read_checkpoint_meta
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
from invokeai.backend.util.silence_warnings import SilenceWarnings
CkptType = Dict[str | int, Any]

View File

@ -17,7 +17,7 @@ from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional, Set
from ..config import ModelRepoVariant
from invokeai.backend.model_manager.config import ModelRepoVariant
def filter_files(

View File

@ -13,14 +13,13 @@ from diffusers import OnnxRuntimeModel, UNet2DConditionModel
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from invokeai.app.shared.models import FreeUConfig
from invokeai.backend.lora import LoRAModelRaw
from invokeai.backend.model_manager import AnyModel
from invokeai.backend.model_manager.load.optimizations import skip_torch_weight_init
from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel
from invokeai.backend.textual_inversion import TextualInversionManager, TextualInversionModelRaw
from invokeai.backend.util.devices import TorchDevice
from .lora import LoRAModelRaw
from .textual_inversion import TextualInversionManager, TextualInversionModelRaw
"""
loras = [
(lora_model1, 0.7),
@ -338,7 +337,7 @@ class ONNXModelPatcher:
loras: List[Tuple[LoRAModelRaw, float]],
prefix: str,
) -> None:
from .models.base import IAIOnnxRuntimeModel
from invokeai.backend.models.base import IAIOnnxRuntimeModel
if not isinstance(model, IAIOnnxRuntimeModel):
raise Exception("Only IAIOnnxRuntimeModel models supported")
@ -425,7 +424,7 @@ class ONNXModelPatcher:
text_encoder: IAIOnnxRuntimeModel,
ti_list: List[Tuple[str, Any]],
) -> Iterator[Tuple[CLIPTokenizer, TextualInversionManager]]:
from .models.base import IAIOnnxRuntimeModel
from invokeai.backend.models.base import IAIOnnxRuntimeModel
if not isinstance(text_encoder, IAIOnnxRuntimeModel):
raise Exception("Only IAIOnnxRuntimeModel models supported")

View File

@ -10,7 +10,7 @@ import torch
from onnx import numpy_helper
from onnxruntime import InferenceSession, SessionOptions, get_available_providers
from ..raw_model import RawModel
from invokeai.backend.raw_model import RawModel
ONNX_WEIGHTS_NAME = "model.onnx"

View File

@ -2,9 +2,12 @@
Initialization file for the invokeai.backend.stable_diffusion package
"""
from .diffusers_pipeline import PipelineIntermediateState, StableDiffusionGeneratorPipeline # noqa: F401
from .diffusion import InvokeAIDiffuserComponent # noqa: F401
from .seamless import set_seamless # noqa: F401
from invokeai.backend.stable_diffusion.diffusers_pipeline import ( # noqa: F401
PipelineIntermediateState,
StableDiffusionGeneratorPipeline,
)
from invokeai.backend.stable_diffusion.diffusion import InvokeAIDiffuserComponent # noqa: F401
from invokeai.backend.stable_diffusion.seamless import set_seamless # noqa: F401
__all__ = [
"PipelineIntermediateState",

View File

@ -2,4 +2,6 @@
Initialization file for invokeai.models.diffusion
"""
from .shared_invokeai_diffusion import InvokeAIDiffuserComponent # noqa: F401
from invokeai.backend.stable_diffusion.diffusion.shared_invokeai_diffusion import (
InvokeAIDiffuserComponent, # noqa: F401
)

View File

@ -1,3 +1,3 @@
from .schedulers import SCHEDULER_MAP # noqa: F401
from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_MAP # noqa: F401
__all__ = ["SCHEDULER_MAP"]

View File

@ -1,3 +1,5 @@
from typing import Any, Literal, Type
from diffusers import (
DDIMScheduler,
DDPMScheduler,
@ -16,8 +18,36 @@ from diffusers import (
TCDScheduler,
UniPCMultistepScheduler,
)
from diffusers.schedulers.scheduling_utils import SchedulerMixin
SCHEDULER_MAP = {
SCHEDULER_NAME_VALUES = Literal[
"ddim",
"ddpm",
"deis",
"lms",
"lms_k",
"pndm",
"heun",
"heun_k",
"euler",
"euler_k",
"euler_a",
"kdpm_2",
"kdpm_2_a",
"dpmpp_2s",
"dpmpp_2s_k",
"dpmpp_2m",
"dpmpp_2m_k",
"dpmpp_2m_sde",
"dpmpp_2m_sde_k",
"dpmpp_sde",
"dpmpp_sde_k",
"unipc",
"lcm",
"tcd",
]
SCHEDULER_MAP: dict[SCHEDULER_NAME_VALUES, tuple[Type[SchedulerMixin], dict[str, Any]]] = {
"ddim": (DDIMScheduler, {}),
"ddpm": (DDPMScheduler, {}),
"deis": (DEISMultistepScheduler, {}),

View File

@ -9,7 +9,7 @@ from safetensors.torch import load_file
from transformers import CLIPTokenizer
from typing_extensions import Self
from .raw_model import RawModel
from invokeai.backend.raw_model import RawModel
class TextualInversionModelRaw(RawModel):

View File

@ -2,8 +2,8 @@
Initialization file for invokeai.backend.util
"""
from .logging import InvokeAILogger
from .util import GIG, Chdir, directory_size
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.backend.util.util import GIG, Chdir, directory_size
__all__ = [
"GIG",