Enforce absolute imports with ruff (#6576)

## Summary

This PR migrates all relative imports to absolute imports, and adds a
ruff check to enforce this going forward.

The justification for this change is here:
https://github.com/invoke-ai/InvokeAI/issues/6575

## QA Instructions

Smoke test all common workflows. Most of the relative -> absolute
conversions could be completed automatically, so the risk is relatively
low.

## Merge Plan

As with any far-reaching change like this, it is likely to cause some
merge conflicts with some in-flight branches. Unfortunately, there's no
way around this, but let me know if you can think of in-flight work that
will be significantly disrupted by this.

## Checklist

- [x] _The PR has a short but descriptive title, suitable for a
changelog_
- [x] _Tests added / updated (if applicable)_ N/A
- [x] _Documentation added / updated (if applicable)_ N/A
This commit is contained in:
Ryan Dick 2024-07-04 10:29:01 -04:00 committed by GitHub
commit b35f5b3877
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
98 changed files with 311 additions and 364 deletions

View File

@ -4,37 +4,39 @@ from logging import Logger
import torch import torch
from invokeai.app.services.board_image_records.board_image_records_sqlite import SqliteBoardImageRecordStorage
from invokeai.app.services.board_images.board_images_default import BoardImagesService
from invokeai.app.services.board_records.board_records_sqlite import SqliteBoardRecordStorage
from invokeai.app.services.boards.boards_default import BoardService
from invokeai.app.services.bulk_download.bulk_download_default import BulkDownloadService
from invokeai.app.services.config.config_default import InvokeAIAppConfig
from invokeai.app.services.download.download_default import DownloadQueueService
from invokeai.app.services.events.events_fastapievents import FastAPIEventService
from invokeai.app.services.image_files.image_files_disk import DiskImageFileStorage
from invokeai.app.services.image_records.image_records_sqlite import SqliteImageRecordStorage
from invokeai.app.services.images.images_default import ImageService
from invokeai.app.services.invocation_cache.invocation_cache_memory import MemoryInvocationCache
from invokeai.app.services.invocation_services import InvocationServices
from invokeai.app.services.invocation_stats.invocation_stats_default import InvocationStatsService
from invokeai.app.services.invoker import Invoker
from invokeai.app.services.model_images.model_images_default import ModelImageFileStorageDisk
from invokeai.app.services.model_manager.model_manager_default import ModelManagerService
from invokeai.app.services.model_records.model_records_sql import ModelRecordServiceSQL
from invokeai.app.services.names.names_default import SimpleNameService
from invokeai.app.services.object_serializer.object_serializer_disk import ObjectSerializerDisk from invokeai.app.services.object_serializer.object_serializer_disk import ObjectSerializerDisk
from invokeai.app.services.object_serializer.object_serializer_forward_cache import ObjectSerializerForwardCache from invokeai.app.services.object_serializer.object_serializer_forward_cache import ObjectSerializerForwardCache
from invokeai.app.services.session_processor.session_processor_default import (
DefaultSessionProcessor,
DefaultSessionRunner,
)
from invokeai.app.services.session_queue.session_queue_sqlite import SqliteSessionQueue
from invokeai.app.services.shared.sqlite.sqlite_util import init_db from invokeai.app.services.shared.sqlite.sqlite_util import init_db
from invokeai.app.services.urls.urls_default import LocalUrlService
from invokeai.app.services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData
from invokeai.backend.util.logging import InvokeAILogger from invokeai.backend.util.logging import InvokeAILogger
from invokeai.version.invokeai_version import __version__ from invokeai.version.invokeai_version import __version__
from ..services.board_image_records.board_image_records_sqlite import SqliteBoardImageRecordStorage
from ..services.board_images.board_images_default import BoardImagesService
from ..services.board_records.board_records_sqlite import SqliteBoardRecordStorage
from ..services.boards.boards_default import BoardService
from ..services.bulk_download.bulk_download_default import BulkDownloadService
from ..services.config import InvokeAIAppConfig
from ..services.download import DownloadQueueService
from ..services.events.events_fastapievents import FastAPIEventService
from ..services.image_files.image_files_disk import DiskImageFileStorage
from ..services.image_records.image_records_sqlite import SqliteImageRecordStorage
from ..services.images.images_default import ImageService
from ..services.invocation_cache.invocation_cache_memory import MemoryInvocationCache
from ..services.invocation_services import InvocationServices
from ..services.invocation_stats.invocation_stats_default import InvocationStatsService
from ..services.invoker import Invoker
from ..services.model_images.model_images_default import ModelImageFileStorageDisk
from ..services.model_manager.model_manager_default import ModelManagerService
from ..services.model_records import ModelRecordServiceSQL
from ..services.names.names_default import SimpleNameService
from ..services.session_processor.session_processor_default import DefaultSessionProcessor, DefaultSessionRunner
from ..services.session_queue.session_queue_sqlite import SqliteSessionQueue
from ..services.urls.urls_default import LocalUrlService
from ..services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage
# TODO: is there a better way to achieve this? # TODO: is there a better way to achieve this?
def check_internet() -> bool: def check_internet() -> bool:

View File

@ -10,14 +10,13 @@ from fastapi import Body
from fastapi.routing import APIRouter from fastapi.routing import APIRouter
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.app.invocations.upscale import ESRGAN_MODELS from invokeai.app.invocations.upscale import ESRGAN_MODELS
from invokeai.app.services.invocation_cache.invocation_cache_common import InvocationCacheStatus from invokeai.app.services.invocation_cache.invocation_cache_common import InvocationCacheStatus
from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch
from invokeai.backend.util.logging import logging from invokeai.backend.util.logging import logging
from invokeai.version import __version__ from invokeai.version import __version__
from ..dependencies import ApiDependencies
class LogLevel(int, Enum): class LogLevel(int, Enum):
NotSet = logging.NOTSET NotSet = logging.NOTSET

View File

@ -2,7 +2,7 @@ from fastapi import Body, HTTPException
from fastapi.routing import APIRouter from fastapi.routing import APIRouter
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from ..dependencies import ApiDependencies from invokeai.app.api.dependencies import ApiDependencies
board_images_router = APIRouter(prefix="/v1/board_images", tags=["boards"]) board_images_router = APIRouter(prefix="/v1/board_images", tags=["boards"])

View File

@ -4,12 +4,11 @@ from fastapi import Body, HTTPException, Path, Query
from fastapi.routing import APIRouter from fastapi.routing import APIRouter
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.app.services.board_records.board_records_common import BoardChanges from invokeai.app.services.board_records.board_records_common import BoardChanges
from invokeai.app.services.boards.boards_common import BoardDTO from invokeai.app.services.boards.boards_common import BoardDTO
from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.pagination import OffsetPaginatedResults
from ..dependencies import ApiDependencies
boards_router = APIRouter(prefix="/v1/boards", tags=["boards"]) boards_router = APIRouter(prefix="/v1/boards", tags=["boards"])

View File

@ -8,13 +8,12 @@ from fastapi.routing import APIRouter
from pydantic.networks import AnyHttpUrl from pydantic.networks import AnyHttpUrl
from starlette.exceptions import HTTPException from starlette.exceptions import HTTPException
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.app.services.download import ( from invokeai.app.services.download import (
DownloadJob, DownloadJob,
UnknownJobIDException, UnknownJobIDException,
) )
from ..dependencies import ApiDependencies
download_queue_router = APIRouter(prefix="/v1/download_queue", tags=["download_queue"]) download_queue_router = APIRouter(prefix="/v1/download_queue", tags=["download_queue"])

View File

@ -8,6 +8,7 @@ from fastapi.routing import APIRouter
from PIL import Image from PIL import Image
from pydantic import BaseModel, Field, JsonValue from pydantic import BaseModel, Field, JsonValue
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.app.invocations.fields import MetadataField from invokeai.app.invocations.fields import MetadataField
from invokeai.app.services.image_records.image_records_common import ( from invokeai.app.services.image_records.image_records_common import (
ImageCategory, ImageCategory,
@ -18,8 +19,6 @@ from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO
from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.pagination import OffsetPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
from ..dependencies import ApiDependencies
images_router = APIRouter(prefix="/v1/images", tags=["images"]) images_router = APIRouter(prefix="/v1/images", tags=["images"])

View File

@ -16,6 +16,7 @@ from pydantic import AnyHttpUrl, BaseModel, ConfigDict, Field
from starlette.exceptions import HTTPException from starlette.exceptions import HTTPException
from typing_extensions import Annotated from typing_extensions import Annotated
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.app.services.model_images.model_images_common import ModelImageFileNotFoundException from invokeai.app.services.model_images.model_images_common import ModelImageFileNotFoundException
from invokeai.app.services.model_install.model_install_common import ModelInstallJob from invokeai.app.services.model_install.model_install_common import ModelInstallJob
from invokeai.app.services.model_records import ( from invokeai.app.services.model_records import (
@ -35,8 +36,6 @@ from invokeai.backend.model_manager.metadata.metadata_base import ModelMetadataW
from invokeai.backend.model_manager.search import ModelSearch from invokeai.backend.model_manager.search import ModelSearch
from invokeai.backend.model_manager.starter_models import STARTER_MODELS, StarterModel, StarterModelWithoutDependencies from invokeai.backend.model_manager.starter_models import STARTER_MODELS, StarterModel, StarterModelWithoutDependencies
from ..dependencies import ApiDependencies
model_manager_router = APIRouter(prefix="/v2/models", tags=["model_manager"]) model_manager_router = APIRouter(prefix="/v2/models", tags=["model_manager"])
# images are immutable; set a high max-age # images are immutable; set a high max-age

View File

@ -4,6 +4,7 @@ from fastapi import Body, Path, Query
from fastapi.routing import APIRouter from fastapi.routing import APIRouter
from pydantic import BaseModel from pydantic import BaseModel
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.app.services.session_processor.session_processor_common import SessionProcessorStatus from invokeai.app.services.session_processor.session_processor_common import SessionProcessorStatus
from invokeai.app.services.session_queue.session_queue_common import ( from invokeai.app.services.session_queue.session_queue_common import (
QUEUE_ITEM_STATUS, QUEUE_ITEM_STATUS,
@ -19,8 +20,6 @@ from invokeai.app.services.session_queue.session_queue_common import (
) )
from invokeai.app.services.shared.pagination import CursorPaginatedResults from invokeai.app.services.shared.pagination import CursorPaginatedResults
from ..dependencies import ApiDependencies
session_queue_router = APIRouter(prefix="/v1/queue", tags=["queue"]) session_queue_router = APIRouter(prefix="/v1/queue", tags=["queue"])

View File

@ -20,14 +20,9 @@ from torch.backends.mps import is_available as is_mps_available
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import) import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import)
import invokeai.frontend.web as web_dir import invokeai.frontend.web as web_dir
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.app.api.no_cache_staticfiles import NoCacheStaticFiles from invokeai.app.api.no_cache_staticfiles import NoCacheStaticFiles
from invokeai.app.services.config.config_default import get_config from invokeai.app.api.routers import (
from invokeai.app.util.custom_openapi import get_openapi_func
from invokeai.backend.util.devices import TorchDevice
from ..backend.util.logging import InvokeAILogger
from .api.dependencies import ApiDependencies
from .api.routers import (
app_info, app_info,
board_images, board_images,
boards, boards,
@ -38,7 +33,11 @@ from .api.routers import (
utilities, utilities,
workflows, workflows,
) )
from .api.sockets import SocketIO from invokeai.app.api.sockets import SocketIO
from invokeai.app.services.config.config_default import get_config
from invokeai.app.util.custom_openapi import get_openapi_func
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.logging import InvokeAILogger
app_config = get_config() app_config = get_config()

View File

@ -40,7 +40,7 @@ from invokeai.app.util.misc import uuid_string
from invokeai.backend.util.logging import InvokeAILogger from invokeai.backend.util.logging import InvokeAILogger
if TYPE_CHECKING: if TYPE_CHECKING:
from ..services.invocation_services import InvocationServices from invokeai.app.services.invocation_services import InvocationServices
logger = InvokeAILogger.get_logger() logger = InvokeAILogger.get_logger()

View File

@ -4,13 +4,12 @@
import numpy as np import numpy as np
from pydantic import ValidationInfo, field_validator from pydantic import ValidationInfo, field_validator
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import InputField
from invokeai.app.invocations.primitives import IntegerCollectionOutput from invokeai.app.invocations.primitives import IntegerCollectionOutput
from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.misc import SEED_MAX from invokeai.app.util.misc import SEED_MAX
from .baseinvocation import BaseInvocation, invocation
from .fields import InputField
@invocation( @invocation(
"range", title="Integer Range", tags=["collection", "integer", "range"], category="collections", version="1.0.0" "range", title="Integer Range", tags=["collection", "integer", "range"], category="collections", version="1.0.0"

View File

@ -5,6 +5,7 @@ from compel import Compel, ReturnedEmbeddingsType
from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
from invokeai.app.invocations.fields import ( from invokeai.app.invocations.fields import (
ConditioningField, ConditioningField,
FieldDescriptions, FieldDescriptions,
@ -14,6 +15,7 @@ from invokeai.app.invocations.fields import (
TensorField, TensorField,
UIComponent, UIComponent,
) )
from invokeai.app.invocations.model import CLIPField
from invokeai.app.invocations.primitives import ConditioningOutput from invokeai.app.invocations.primitives import ConditioningOutput
from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.ti_utils import generate_ti_list from invokeai.app.util.ti_utils import generate_ti_list
@ -26,9 +28,6 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
) )
from invokeai.backend.util.devices import TorchDevice from invokeai.backend.util.devices import TorchDevice
from .baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
from .model import CLIPField
# unconditioned: Optional[torch.Tensor] # unconditioned: Optional[torch.Tensor]

View File

@ -22,6 +22,13 @@ from controlnet_aux.util import HWC3, ade_palette
from PIL import Image from PIL import Image
from pydantic import BaseModel, Field, field_validator, model_validator from pydantic import BaseModel, Field, field_validator, model_validator
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
Classification,
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import ( from invokeai.app.invocations.fields import (
FieldDescriptions, FieldDescriptions,
ImageField, ImageField,
@ -45,8 +52,6 @@ from invokeai.backend.image_util.lineart_anime import LineartAnimeProcessor
from invokeai.backend.image_util.util import np_to_pil, pil_to_np from invokeai.backend.image_util.util import np_to_pil, pil_to_np
from invokeai.backend.util.devices import TorchDevice from invokeai.backend.util.devices import TorchDevice
from .baseinvocation import BaseInvocation, BaseInvocationOutput, Classification, invocation, invocation_output
class ControlField(BaseModel): class ControlField(BaseModel):
image: ImageField = Field(description="The control image") image: ImageField = Field(description="The control image")

View File

@ -5,13 +5,11 @@ import cv2 as cv
import numpy import numpy
from PIL import Image, ImageOps from PIL import Image, ImageOps
from invokeai.app.invocations.fields import ImageField from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import ImageField, InputField, WithBoard, WithMetadata
from invokeai.app.invocations.primitives import ImageOutput from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.services.shared.invocation_context import InvocationContext
from .baseinvocation import BaseInvocation, invocation
from .fields import InputField, WithBoard, WithMetadata
@invocation("cv_inpaint", title="OpenCV Inpaint", tags=["opencv", "inpaint"], category="inpaint", version="1.3.1") @invocation("cv_inpaint", title="OpenCV Inpaint", tags=["opencv", "inpaint"], category="inpaint", version="1.3.1")
class CvInpaintInvocation(BaseInvocation, WithMetadata, WithBoard): class CvInpaintInvocation(BaseInvocation, WithMetadata, WithBoard):

View File

@ -6,6 +6,7 @@ import cv2
import numpy import numpy
from PIL import Image, ImageChops, ImageFilter, ImageOps from PIL import Image, ImageChops, ImageFilter, ImageOps
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
from invokeai.app.invocations.constants import IMAGE_MODES from invokeai.app.invocations.constants import IMAGE_MODES
from invokeai.app.invocations.fields import ( from invokeai.app.invocations.fields import (
ColorField, ColorField,
@ -21,8 +22,6 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
from invokeai.backend.image_util.safety_checker import SafetyChecker from invokeai.backend.image_util.safety_checker import SafetyChecker
from .baseinvocation import BaseInvocation, Classification, invocation
@invocation("show_image", title="Show Image", tags=["image"], category="image", version="1.0.1") @invocation("show_image", title="Show Image", tags=["image"], category="image", version="1.0.1")
class ShowImageInvocation(BaseInvocation): class ShowImageInvocation(BaseInvocation):

View File

@ -3,7 +3,9 @@ from typing import Literal, get_args
from PIL import Image from PIL import Image
from invokeai.app.invocations.fields import ColorField, ImageField from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import ColorField, ImageField, InputField, WithBoard, WithMetadata
from invokeai.app.invocations.image import PIL_RESAMPLING_MAP, PIL_RESAMPLING_MODES
from invokeai.app.invocations.primitives import ImageOutput from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.misc import SEED_MAX from invokeai.app.util.misc import SEED_MAX
@ -14,10 +16,6 @@ from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch, in
from invokeai.backend.image_util.infill_methods.tile import infill_tile from invokeai.backend.image_util.infill_methods.tile import infill_tile
from invokeai.backend.util.logging import InvokeAILogger from invokeai.backend.util.logging import InvokeAILogger
from .baseinvocation import BaseInvocation, invocation
from .fields import InputField, WithBoard, WithMetadata
from .image import PIL_RESAMPLING_MAP, PIL_RESAMPLING_MODES
logger = InvokeAILogger.get_logger() logger = InvokeAILogger.get_logger()

View File

@ -5,12 +5,11 @@ from typing import Literal
import numpy as np import numpy as np
from pydantic import ValidationInfo, field_validator from pydantic import ValidationInfo, field_validator
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import FieldDescriptions, InputField from invokeai.app.invocations.fields import FieldDescriptions, InputField
from invokeai.app.invocations.primitives import FloatOutput, IntegerOutput from invokeai.app.invocations.primitives import FloatOutput, IntegerOutput
from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.services.shared.invocation_context import InvocationContext
from .baseinvocation import BaseInvocation, invocation
@invocation("add", title="Add Integers", tags=["math", "add"], category="math", version="1.0.1") @invocation("add", title="Add Integers", tags=["math", "add"], category="math", version="1.0.1")
class AddInvocation(BaseInvocation): class AddInvocation(BaseInvocation):

View File

@ -14,8 +14,7 @@ from invokeai.app.invocations.fields import (
from invokeai.app.invocations.model import ModelIdentifierField from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES
from invokeai.version.invokeai_version import __version__
from ...version import __version__
class MetadataItemField(BaseModel): class MetadataItemField(BaseModel):

View File

@ -3,18 +3,17 @@ from typing import List, Optional
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType from invokeai.app.invocations.baseinvocation import (
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.shared.models import FreeUConfig
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelType, SubModelType
from .baseinvocation import (
BaseInvocation, BaseInvocation,
BaseInvocationOutput, BaseInvocationOutput,
Classification, Classification,
invocation, invocation,
invocation_output, invocation_output,
) )
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.shared.models import FreeUConfig
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelType, SubModelType
class ModelIdentifierField(BaseModel): class ModelIdentifierField(BaseModel):

View File

@ -4,18 +4,12 @@
import torch import torch
from pydantic import field_validator from pydantic import field_validator
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
from invokeai.app.invocations.fields import FieldDescriptions, InputField, LatentsField, OutputField from invokeai.app.invocations.fields import FieldDescriptions, InputField, LatentsField, OutputField
from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.misc import SEED_MAX from invokeai.app.util.misc import SEED_MAX
from invokeai.backend.util.devices import TorchDevice
from ...backend.util.devices import TorchDevice
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
invocation,
invocation_output,
)
""" """
Utilities Utilities

View File

@ -39,12 +39,11 @@ from easing_functions import (
) )
from matplotlib.ticker import MaxNLocator from matplotlib.ticker import MaxNLocator
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import InputField
from invokeai.app.invocations.primitives import FloatCollectionOutput from invokeai.app.invocations.primitives import FloatCollectionOutput
from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.services.shared.invocation_context import InvocationContext
from .baseinvocation import BaseInvocation, invocation
from .fields import InputField
@invocation( @invocation(
"float_range", "float_range",

View File

@ -4,6 +4,7 @@ from typing import Optional
import torch import torch
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
from invokeai.app.invocations.fields import ( from invokeai.app.invocations.fields import (
ColorField, ColorField,
@ -21,13 +22,6 @@ from invokeai.app.invocations.fields import (
from invokeai.app.services.images.images_common import ImageDTO from invokeai.app.services.images.images_common import ImageDTO
from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.services.shared.invocation_context import InvocationContext
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
invocation,
invocation_output,
)
""" """
Primitives: Boolean, Integer, Float, String, Image, Latents, Conditioning, Color Primitives: Boolean, Integer, Float, String, Image, Latents, Conditioning, Color
- primitive nodes - primitive nodes

View File

@ -5,12 +5,11 @@ import numpy as np
from dynamicprompts.generators import CombinatorialPromptGenerator, RandomPromptGenerator from dynamicprompts.generators import CombinatorialPromptGenerator, RandomPromptGenerator
from pydantic import field_validator from pydantic import field_validator
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import InputField, UIComponent
from invokeai.app.invocations.primitives import StringCollectionOutput from invokeai.app.invocations.primitives import StringCollectionOutput
from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.services.shared.invocation_context import InvocationContext
from .baseinvocation import BaseInvocation, invocation
from .fields import InputField, UIComponent
@invocation( @invocation(
"dynamic_prompt", "dynamic_prompt",

View File

@ -1,15 +1,9 @@
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, UIType from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, UIType
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, UNetField, VAEField
from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager import SubModelType from invokeai.backend.model_manager import SubModelType
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
invocation,
invocation_output,
)
from .model import CLIPField, ModelIdentifierField, UNetField, VAEField
@invocation_output("sdxl_model_loader_output") @invocation_output("sdxl_model_loader_output")
class SDXLModelLoaderOutput(BaseInvocationOutput): class SDXLModelLoaderOutput(BaseInvocationOutput):

View File

@ -2,17 +2,11 @@
import re import re
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
from invokeai.app.invocations.fields import InputField, OutputField, UIComponent
from invokeai.app.invocations.primitives import StringOutput
from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.services.shared.invocation_context import InvocationContext
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
invocation,
invocation_output,
)
from .fields import InputField, OutputField, UIComponent
from .primitives import StringOutput
@invocation_output("string_pos_neg_output") @invocation_output("string_pos_neg_output")
class StringPosNegOutput(BaseInvocationOutput): class StringPosNegOutput(BaseInvocationOutput):

View File

@ -6,15 +6,13 @@ import numpy as np
from PIL import Image from PIL import Image
from pydantic import ConfigDict from pydantic import ConfigDict
from invokeai.app.invocations.fields import ImageField from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import ImageField, InputField, WithBoard, WithMetadata
from invokeai.app.invocations.primitives import ImageOutput from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.image_util.basicsr.rrdbnet_arch import RRDBNet from invokeai.backend.image_util.basicsr.rrdbnet_arch import RRDBNet
from invokeai.backend.image_util.realesrgan.realesrgan import RealESRGAN from invokeai.backend.image_util.realesrgan.realesrgan import RealESRGAN
from .baseinvocation import BaseInvocation, invocation
from .fields import InputField, WithBoard, WithMetadata
# TODO: Populate this from disk? # TODO: Populate this from disk?
# TODO: Use model manager to load? # TODO: Use model manager to load?
ESRGAN_MODELS = Literal[ ESRGAN_MODELS = Literal[

View File

@ -2,12 +2,11 @@ import sqlite3
import threading import threading
from typing import Optional, cast from typing import Optional, cast
from invokeai.app.services.board_image_records.board_image_records_base import BoardImageRecordStorageBase
from invokeai.app.services.image_records.image_records_common import ImageRecord, deserialize_image_record from invokeai.app.services.image_records.image_records_common import ImageRecord, deserialize_image_record
from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.pagination import OffsetPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
from .board_image_records_base import BoardImageRecordStorageBase
class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase): class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
_conn: sqlite3.Connection _conn: sqlite3.Connection

View File

@ -1,9 +1,8 @@
from typing import Optional from typing import Optional
from invokeai.app.services.board_images.board_images_base import BoardImagesServiceABC
from invokeai.app.services.invoker import Invoker from invokeai.app.services.invoker import Invoker
from .board_images_base import BoardImagesServiceABC
class BoardImagesService(BoardImagesServiceABC): class BoardImagesService(BoardImagesServiceABC):
__invoker: Invoker __invoker: Invoker

View File

@ -1,9 +1,8 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from invokeai.app.services.board_records.board_records_common import BoardChanges, BoardRecord
from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.pagination import OffsetPaginatedResults
from .board_records_common import BoardChanges, BoardRecord
class BoardRecordStorageBase(ABC): class BoardRecordStorageBase(ABC):
"""Low-level service responsible for interfacing with the board record store.""" """Low-level service responsible for interfacing with the board record store."""

View File

@ -2,12 +2,8 @@ import sqlite3
import threading import threading
from typing import Union, cast from typing import Union, cast
from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.board_records.board_records_base import BoardRecordStorageBase
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase from invokeai.app.services.board_records.board_records_common import (
from invokeai.app.util.misc import uuid_string
from .board_records_base import BoardRecordStorageBase
from .board_records_common import (
BoardChanges, BoardChanges,
BoardRecord, BoardRecord,
BoardRecordDeleteException, BoardRecordDeleteException,
@ -15,6 +11,9 @@ from .board_records_common import (
BoardRecordSaveException, BoardRecordSaveException,
deserialize_board_record, deserialize_board_record,
) )
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
from invokeai.app.util.misc import uuid_string
class SqliteBoardRecordStorage(BoardRecordStorageBase): class SqliteBoardRecordStorage(BoardRecordStorageBase):

View File

@ -1,10 +1,9 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from invokeai.app.services.board_records.board_records_common import BoardChanges from invokeai.app.services.board_records.board_records_common import BoardChanges
from invokeai.app.services.boards.boards_common import BoardDTO
from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.pagination import OffsetPaginatedResults
from .boards_common import BoardDTO
class BoardServiceABC(ABC): class BoardServiceABC(ABC):
"""High-level service for board management.""" """High-level service for board management."""

View File

@ -2,7 +2,7 @@ from typing import Optional
from pydantic import Field from pydantic import Field
from ..board_records.board_records_common import BoardRecord from invokeai.app.services.board_records.board_records_common import BoardRecord
class BoardDTO(BoardRecord): class BoardDTO(BoardRecord):

View File

@ -1,11 +1,9 @@
from invokeai.app.services.board_records.board_records_common import BoardChanges from invokeai.app.services.board_records.board_records_common import BoardChanges
from invokeai.app.services.boards.boards_common import BoardDTO from invokeai.app.services.boards.boards_base import BoardServiceABC
from invokeai.app.services.boards.boards_common import BoardDTO, board_record_to_dto
from invokeai.app.services.invoker import Invoker from invokeai.app.services.invoker import Invoker
from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.pagination import OffsetPaginatedResults
from .boards_base import BoardServiceABC
from .boards_common import board_record_to_dto
class BoardService(BoardServiceABC): class BoardService(BoardServiceABC):
__invoker: Invoker __invoker: Invoker

View File

@ -4,6 +4,7 @@ from typing import Optional, Union
from zipfile import ZipFile from zipfile import ZipFile
from invokeai.app.services.board_records.board_records_common import BoardRecordNotFoundException from invokeai.app.services.board_records.board_records_common import BoardRecordNotFoundException
from invokeai.app.services.bulk_download.bulk_download_base import BulkDownloadBase
from invokeai.app.services.bulk_download.bulk_download_common import ( from invokeai.app.services.bulk_download.bulk_download_common import (
DEFAULT_BULK_DOWNLOAD_ID, DEFAULT_BULK_DOWNLOAD_ID,
BulkDownloadException, BulkDownloadException,
@ -15,8 +16,6 @@ from invokeai.app.services.images.images_common import ImageDTO
from invokeai.app.services.invoker import Invoker from invokeai.app.services.invoker import Invoker
from invokeai.app.util.misc import uuid_string from invokeai.app.util.misc import uuid_string
from .bulk_download_base import BulkDownloadBase
class BulkDownloadService(BulkDownloadBase): class BulkDownloadService(BulkDownloadBase):
def start(self, invoker: Invoker) -> None: def start(self, invoker: Invoker) -> None:

View File

@ -1,7 +1,6 @@
"""Init file for InvokeAI configure package.""" """Init file for InvokeAI configure package."""
from invokeai.app.services.config.config_common import PagingArgumentParser from invokeai.app.services.config.config_common import PagingArgumentParser
from invokeai.app.services.config.config_default import InvokeAIAppConfig, get_config
from .config_default import InvokeAIAppConfig, get_config
__all__ = ["InvokeAIAppConfig", "get_config", "PagingArgumentParser"] __all__ = ["InvokeAIAppConfig", "get_config", "PagingArgumentParser"]

View File

@ -1,13 +1,13 @@
"""Init file for download queue.""" """Init file for download queue."""
from .download_base import ( from invokeai.app.services.download.download_base import (
DownloadJob, DownloadJob,
DownloadJobStatus, DownloadJobStatus,
DownloadQueueServiceBase, DownloadQueueServiceBase,
MultiFileDownloadJob, MultiFileDownloadJob,
UnknownJobIDException, UnknownJobIDException,
) )
from .download_default import DownloadQueueService, TqdmProgress from invokeai.app.services.download.download_default import DownloadQueueService, TqdmProgress
__all__ = [ __all__ = [
"DownloadJob", "DownloadJob",

View File

@ -16,12 +16,7 @@ from requests import HTTPError
from tqdm import tqdm from tqdm import tqdm
from invokeai.app.services.config import InvokeAIAppConfig, get_config from invokeai.app.services.config import InvokeAIAppConfig, get_config
from invokeai.app.services.events.events_base import EventServiceBase from invokeai.app.services.download.download_base import (
from invokeai.app.util.misc import get_iso_timestamp
from invokeai.backend.model_manager.metadata import RemoteModelFile
from invokeai.backend.util.logging import InvokeAILogger
from .download_base import (
DownloadEventHandler, DownloadEventHandler,
DownloadExceptionHandler, DownloadExceptionHandler,
DownloadJob, DownloadJob,
@ -33,6 +28,10 @@ from .download_base import (
ServiceInactiveException, ServiceInactiveException,
UnknownJobIDException, UnknownJobIDException,
) )
from invokeai.app.services.events.events_base import EventServiceBase
from invokeai.app.util.misc import get_iso_timestamp
from invokeai.backend.model_manager.metadata import RemoteModelFile
from invokeai.backend.util.logging import InvokeAILogger
# Maximum number of bytes to download during each call to requests.iter_content() # Maximum number of bytes to download during each call to requests.iter_content()
DOWNLOAD_CHUNK_SIZE = 100000 DOWNLOAD_CHUNK_SIZE = 100000

View File

@ -6,12 +6,11 @@ from queue import Empty, Queue
from fastapi_events.dispatcher import dispatch from fastapi_events.dispatcher import dispatch
from invokeai.app.services.events.events_base import EventServiceBase
from invokeai.app.services.events.events_common import ( from invokeai.app.services.events.events_common import (
EventBase, EventBase,
) )
from .events_base import EventServiceBase
class FastAPIEventService(EventServiceBase): class FastAPIEventService(EventServiceBase):
def __init__(self, event_handler_id: int) -> None: def __init__(self, event_handler_id: int) -> None:

View File

@ -7,12 +7,15 @@ from PIL import Image, PngImagePlugin
from PIL.Image import Image as PILImageType from PIL.Image import Image as PILImageType
from send2trash import send2trash from send2trash import send2trash
from invokeai.app.services.image_files.image_files_base import ImageFileStorageBase
from invokeai.app.services.image_files.image_files_common import (
ImageFileDeleteException,
ImageFileNotFoundException,
ImageFileSaveException,
)
from invokeai.app.services.invoker import Invoker from invokeai.app.services.invoker import Invoker
from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail
from .image_files_base import ImageFileStorageBase
from .image_files_common import ImageFileDeleteException, ImageFileNotFoundException, ImageFileSaveException
class DiskImageFileStorage(ImageFileStorageBase): class DiskImageFileStorage(ImageFileStorageBase):
"""Stores images on disk""" """Stores images on disk"""

View File

@ -3,11 +3,15 @@ from datetime import datetime
from typing import Optional from typing import Optional
from invokeai.app.invocations.fields import MetadataField from invokeai.app.invocations.fields import MetadataField
from invokeai.app.services.image_records.image_records_common import (
ImageCategory,
ImageRecord,
ImageRecordChanges,
ResourceOrigin,
)
from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.pagination import OffsetPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
from .image_records_common import ImageCategory, ImageRecord, ImageRecordChanges, ResourceOrigin
class ImageRecordStorageBase(ABC): class ImageRecordStorageBase(ABC):
"""Low-level service responsible for interfacing with the image record store.""" """Low-level service responsible for interfacing with the image record store."""

View File

@ -4,12 +4,8 @@ from datetime import datetime
from typing import Optional, Union, cast from typing import Optional, Union, cast
from invokeai.app.invocations.fields import MetadataField, MetadataFieldValidator from invokeai.app.invocations.fields import MetadataField, MetadataFieldValidator
from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.image_records.image_records_base import ImageRecordStorageBase
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection from invokeai.app.services.image_records.image_records_common import (
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
from .image_records_base import ImageRecordStorageBase
from .image_records_common import (
IMAGE_DTO_COLS, IMAGE_DTO_COLS,
ImageCategory, ImageCategory,
ImageRecord, ImageRecord,
@ -20,6 +16,9 @@ from .image_records_common import (
ResourceOrigin, ResourceOrigin,
deserialize_image_record, deserialize_image_record,
) )
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
class SqliteImageRecordStorage(ImageRecordStorageBase): class SqliteImageRecordStorage(ImageRecordStorageBase):

View File

@ -3,16 +3,12 @@ from typing import Optional
from PIL.Image import Image as PILImageType from PIL.Image import Image as PILImageType
from invokeai.app.invocations.fields import MetadataField from invokeai.app.invocations.fields import MetadataField
from invokeai.app.services.invoker import Invoker from invokeai.app.services.image_files.image_files_common import (
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
from ..image_files.image_files_common import (
ImageFileDeleteException, ImageFileDeleteException,
ImageFileNotFoundException, ImageFileNotFoundException,
ImageFileSaveException, ImageFileSaveException,
) )
from ..image_records.image_records_common import ( from invokeai.app.services.image_records.image_records_common import (
ImageCategory, ImageCategory,
ImageRecord, ImageRecord,
ImageRecordChanges, ImageRecordChanges,
@ -23,8 +19,11 @@ from ..image_records.image_records_common import (
InvalidOriginException, InvalidOriginException,
ResourceOrigin, ResourceOrigin,
) )
from .images_base import ImageServiceABC from invokeai.app.services.images.images_base import ImageServiceABC
from .images_common import ImageDTO, image_record_to_dto from invokeai.app.services.images.images_common import ImageDTO, image_record_to_dto
from invokeai.app.services.invoker import Invoker
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
class ImageService(ImageServiceABC): class ImageService(ImageServiceABC):

View File

@ -10,29 +10,28 @@ if TYPE_CHECKING:
import torch import torch
from invokeai.app.services.board_image_records.board_image_records_base import BoardImageRecordStorageBase
from invokeai.app.services.board_images.board_images_base import BoardImagesServiceABC
from invokeai.app.services.board_records.board_records_base import BoardRecordStorageBase
from invokeai.app.services.boards.boards_base import BoardServiceABC
from invokeai.app.services.bulk_download.bulk_download_base import BulkDownloadBase
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.download import DownloadQueueServiceBase
from invokeai.app.services.events.events_base import EventServiceBase
from invokeai.app.services.image_files.image_files_base import ImageFileStorageBase
from invokeai.app.services.image_records.image_records_base import ImageRecordStorageBase
from invokeai.app.services.images.images_base import ImageServiceABC
from invokeai.app.services.invocation_cache.invocation_cache_base import InvocationCacheBase
from invokeai.app.services.invocation_stats.invocation_stats_base import InvocationStatsServiceBase
from invokeai.app.services.model_images.model_images_base import ModelImageFileStorageBase
from invokeai.app.services.model_manager.model_manager_base import ModelManagerServiceBase
from invokeai.app.services.names.names_base import NameServiceBase
from invokeai.app.services.session_processor.session_processor_base import SessionProcessorBase
from invokeai.app.services.session_queue.session_queue_base import SessionQueueBase
from invokeai.app.services.urls.urls_base import UrlServiceBase
from invokeai.app.services.workflow_records.workflow_records_base import WorkflowRecordsStorageBase
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData
from .board_image_records.board_image_records_base import BoardImageRecordStorageBase
from .board_images.board_images_base import BoardImagesServiceABC
from .board_records.board_records_base import BoardRecordStorageBase
from .boards.boards_base import BoardServiceABC
from .bulk_download.bulk_download_base import BulkDownloadBase
from .config import InvokeAIAppConfig
from .download import DownloadQueueServiceBase
from .events.events_base import EventServiceBase
from .image_files.image_files_base import ImageFileStorageBase
from .image_records.image_records_base import ImageRecordStorageBase
from .images.images_base import ImageServiceABC
from .invocation_cache.invocation_cache_base import InvocationCacheBase
from .invocation_stats.invocation_stats_base import InvocationStatsServiceBase
from .model_images.model_images_base import ModelImageFileStorageBase
from .model_manager.model_manager_base import ModelManagerServiceBase
from .names.names_base import NameServiceBase
from .session_processor.session_processor_base import SessionProcessorBase
from .session_queue.session_queue_base import SessionQueueBase
from .urls.urls_base import UrlServiceBase
from .workflow_records.workflow_records_base import WorkflowRecordsStorageBase
class InvocationServices: class InvocationServices:
"""Services that can be used by invocations""" """Services that can be used by invocations"""

View File

@ -9,11 +9,8 @@ import torch
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.invocations.baseinvocation import BaseInvocation from invokeai.app.invocations.baseinvocation import BaseInvocation
from invokeai.app.services.invoker import Invoker from invokeai.app.services.invocation_stats.invocation_stats_base import InvocationStatsServiceBase
from invokeai.backend.model_manager.load.model_cache import CacheStats from invokeai.app.services.invocation_stats.invocation_stats_common import (
from .invocation_stats_base import InvocationStatsServiceBase
from .invocation_stats_common import (
GESStatsNotFoundError, GESStatsNotFoundError,
GraphExecutionStats, GraphExecutionStats,
GraphExecutionStatsSummary, GraphExecutionStatsSummary,
@ -22,6 +19,8 @@ from .invocation_stats_common import (
NodeExecutionStats, NodeExecutionStats,
NodeExecutionStatsSummary, NodeExecutionStatsSummary,
) )
from invokeai.app.services.invoker import Invoker
from invokeai.backend.model_manager.load.model_cache import CacheStats
# Size of 1GB in bytes. # Size of 1GB in bytes.
GB = 2**30 GB = 2**30

View File

@ -1,7 +1,7 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from .invocation_services import InvocationServices from invokeai.app.services.invocation_services import InvocationServices
class Invoker: class Invoker:

View File

@ -5,15 +5,14 @@ from PIL.Image import Image as PILImageType
from send2trash import send2trash from send2trash import send2trash
from invokeai.app.services.invoker import Invoker from invokeai.app.services.invoker import Invoker
from invokeai.app.util.misc import uuid_string from invokeai.app.services.model_images.model_images_base import ModelImageFileStorageBase
from invokeai.app.util.thumbnails import make_thumbnail from invokeai.app.services.model_images.model_images_common import (
from .model_images_base import ModelImageFileStorageBase
from .model_images_common import (
ModelImageFileDeleteException, ModelImageFileDeleteException,
ModelImageFileNotFoundException, ModelImageFileNotFoundException,
ModelImageFileSaveException, ModelImageFileSaveException,
) )
from invokeai.app.util.misc import uuid_string
from invokeai.app.util.thumbnails import make_thumbnail
class ModelImageFileStorageDisk(ModelImageFileStorageBase): class ModelImageFileStorageDisk(ModelImageFileStorageBase):

View File

@ -1,9 +1,7 @@
"""Initialization file for model install service package.""" """Initialization file for model install service package."""
from .model_install_base import ( from invokeai.app.services.model_install.model_install_base import ModelInstallServiceBase
ModelInstallServiceBase, from invokeai.app.services.model_install.model_install_common import (
)
from .model_install_common import (
HFModelSource, HFModelSource,
InstallStatus, InstallStatus,
LocalModelSource, LocalModelSource,
@ -12,7 +10,7 @@ from .model_install_common import (
UnknownInstallJobException, UnknownInstallJobException,
URLModelSource, URLModelSource,
) )
from .model_install_default import ModelInstallService from invokeai.app.services.model_install.model_install_default import ModelInstallService
__all__ = [ __all__ = [
"ModelInstallServiceBase", "ModelInstallServiceBase",

View File

@ -23,6 +23,16 @@ from invokeai.app.services.download import DownloadQueueServiceBase, MultiFileDo
from invokeai.app.services.events.events_base import EventServiceBase from invokeai.app.services.events.events_base import EventServiceBase
from invokeai.app.services.invoker import Invoker from invokeai.app.services.invoker import Invoker
from invokeai.app.services.model_install.model_install_base import ModelInstallServiceBase from invokeai.app.services.model_install.model_install_base import ModelInstallServiceBase
from invokeai.app.services.model_install.model_install_common import (
MODEL_SOURCE_TO_TYPE_MAP,
HFModelSource,
InstallStatus,
LocalModelSource,
ModelInstallJob,
ModelSource,
StringLikeSource,
URLModelSource,
)
from invokeai.app.services.model_records import DuplicateModelException, ModelRecordServiceBase from invokeai.app.services.model_records import DuplicateModelException, ModelRecordServiceBase
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
from invokeai.backend.model_manager.config import ( from invokeai.backend.model_manager.config import (
@ -47,17 +57,6 @@ from invokeai.backend.util.catch_sigint import catch_sigint
from invokeai.backend.util.devices import TorchDevice from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.util import slugify from invokeai.backend.util.util import slugify
from .model_install_common import (
MODEL_SOURCE_TO_TYPE_MAP,
HFModelSource,
InstallStatus,
LocalModelSource,
ModelInstallJob,
ModelSource,
StringLikeSource,
URLModelSource,
)
TMPDIR_PREFIX = "tmpinstall_" TMPDIR_PREFIX = "tmpinstall_"

View File

@ -1,6 +1,6 @@
"""Initialization file for model load service module.""" """Initialization file for model load service module."""
from .model_load_base import ModelLoadServiceBase from invokeai.app.services.model_load.model_load_base import ModelLoadServiceBase
from .model_load_default import ModelLoadService from invokeai.app.services.model_load.model_load_default import ModelLoadService
__all__ = ["ModelLoadServiceBase", "ModelLoadService"] __all__ = ["ModelLoadServiceBase", "ModelLoadService"]

View File

@ -10,6 +10,7 @@ from torch import load as torch_load
from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.invoker import Invoker from invokeai.app.services.invoker import Invoker
from invokeai.app.services.model_load.model_load_base import ModelLoadServiceBase
from invokeai.backend.model_manager import AnyModel, AnyModelConfig, SubModelType from invokeai.backend.model_manager import AnyModel, AnyModelConfig, SubModelType
from invokeai.backend.model_manager.load import ( from invokeai.backend.model_manager.load import (
LoadedModel, LoadedModel,
@ -22,8 +23,6 @@ from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import
from invokeai.backend.util.devices import TorchDevice from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.logging import InvokeAILogger from invokeai.backend.util.logging import InvokeAILogger
from .model_load_base import ModelLoadServiceBase
class ModelLoadService(ModelLoadServiceBase): class ModelLoadService(ModelLoadServiceBase):
"""Wrapper around ModelLoaderRegistry.""" """Wrapper around ModelLoaderRegistry."""

View File

@ -1,10 +1,9 @@
"""Initialization file for model manager service.""" """Initialization file for model manager service."""
from invokeai.app.services.model_manager.model_manager_default import ModelManagerService, ModelManagerServiceBase
from invokeai.backend.model_manager import AnyModel, AnyModelConfig, BaseModelType, ModelType, SubModelType from invokeai.backend.model_manager import AnyModel, AnyModelConfig, BaseModelType, ModelType, SubModelType
from invokeai.backend.model_manager.load import LoadedModel from invokeai.backend.model_manager.load import LoadedModel
from .model_manager_default import ModelManagerService, ModelManagerServiceBase
__all__ = [ __all__ = [
"ModelManagerServiceBase", "ModelManagerServiceBase",
"ModelManagerService", "ModelManagerService",

View File

@ -5,14 +5,13 @@ from abc import ABC, abstractmethod
import torch import torch
from typing_extensions import Self from typing_extensions import Self
from invokeai.app.services.config.config_default import InvokeAIAppConfig
from invokeai.app.services.download.download_base import DownloadQueueServiceBase
from invokeai.app.services.events.events_base import EventServiceBase
from invokeai.app.services.invoker import Invoker from invokeai.app.services.invoker import Invoker
from invokeai.app.services.model_install.model_install_base import ModelInstallServiceBase
from ..config import InvokeAIAppConfig from invokeai.app.services.model_load.model_load_base import ModelLoadServiceBase
from ..download import DownloadQueueServiceBase from invokeai.app.services.model_records.model_records_base import ModelRecordServiceBase
from ..events.events_base import EventServiceBase
from ..model_install import ModelInstallServiceBase
from ..model_load import ModelLoadServiceBase
from ..model_records import ModelRecordServiceBase
class ModelManagerServiceBase(ABC): class ModelManagerServiceBase(ABC):

View File

@ -6,19 +6,20 @@ from typing import Optional
import torch import torch
from typing_extensions import Self from typing_extensions import Self
from invokeai.app.services.config.config_default import InvokeAIAppConfig
from invokeai.app.services.download.download_base import DownloadQueueServiceBase
from invokeai.app.services.events.events_base import EventServiceBase
from invokeai.app.services.invoker import Invoker from invokeai.app.services.invoker import Invoker
from invokeai.app.services.model_install.model_install_base import ModelInstallServiceBase
from invokeai.app.services.model_install.model_install_default import ModelInstallService
from invokeai.app.services.model_load.model_load_base import ModelLoadServiceBase
from invokeai.app.services.model_load.model_load_default import ModelLoadService
from invokeai.app.services.model_manager.model_manager_base import ModelManagerServiceBase
from invokeai.app.services.model_records.model_records_base import ModelRecordServiceBase
from invokeai.backend.model_manager.load import ModelCache, ModelLoaderRegistry from invokeai.backend.model_manager.load import ModelCache, ModelLoaderRegistry
from invokeai.backend.util.devices import TorchDevice from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.logging import InvokeAILogger from invokeai.backend.util.logging import InvokeAILogger
from ..config import InvokeAIAppConfig
from ..download import DownloadQueueServiceBase
from ..events.events_base import EventServiceBase
from ..model_install import ModelInstallService, ModelInstallServiceBase
from ..model_load import ModelLoadService, ModelLoadServiceBase
from ..model_records import ModelRecordServiceBase
from .model_manager_base import ModelManagerServiceBase
class ModelManagerService(ModelManagerServiceBase): class ModelManagerService(ModelManagerServiceBase):
""" """

View File

@ -45,17 +45,7 @@ from math import ceil
from pathlib import Path from pathlib import Path
from typing import List, Optional, Union from typing import List, Optional, Union
from invokeai.app.services.shared.pagination import PaginatedResults from invokeai.app.services.model_records.model_records_base import (
from invokeai.backend.model_manager.config import (
AnyModelConfig,
BaseModelType,
ModelConfigFactory,
ModelFormat,
ModelType,
)
from ..shared.sqlite.sqlite_database import SqliteDatabase
from .model_records_base import (
DuplicateModelException, DuplicateModelException,
ModelRecordChanges, ModelRecordChanges,
ModelRecordOrderBy, ModelRecordOrderBy,
@ -63,6 +53,15 @@ from .model_records_base import (
ModelSummary, ModelSummary,
UnknownModelException, UnknownModelException,
) )
from invokeai.app.services.shared.pagination import PaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
from invokeai.backend.model_manager.config import (
AnyModelConfig,
BaseModelType,
ModelConfigFactory,
ModelFormat,
ModelType,
)
class ModelRecordServiceSQL(ModelRecordServiceBase): class ModelRecordServiceSQL(ModelRecordServiceBase):

View File

@ -1,7 +1,6 @@
from invokeai.app.services.names.names_base import NameServiceBase
from invokeai.app.util.misc import uuid_string from invokeai.app.util.misc import uuid_string
from .names_base import NameServiceBase
class SimpleNameService(NameServiceBase): class SimpleNameService(NameServiceBase):
"""Creates image names from UUIDs.""" """Creates image names from UUIDs."""

View File

@ -13,24 +13,24 @@ from invokeai.app.services.events.events_common import (
register_events, register_events,
) )
from invokeai.app.services.invocation_stats.invocation_stats_common import GESStatsNotFoundError from invokeai.app.services.invocation_stats.invocation_stats_common import GESStatsNotFoundError
from invokeai.app.services.invoker import Invoker
from invokeai.app.services.session_processor.session_processor_base import ( from invokeai.app.services.session_processor.session_processor_base import (
InvocationServices,
OnAfterRunNode, OnAfterRunNode,
OnAfterRunSession, OnAfterRunSession,
OnBeforeRunNode, OnBeforeRunNode,
OnBeforeRunSession, OnBeforeRunSession,
OnNodeError, OnNodeError,
OnNonFatalProcessorError, OnNonFatalProcessorError,
SessionProcessorBase,
SessionRunnerBase,
) )
from invokeai.app.services.session_processor.session_processor_common import CanceledException from invokeai.app.services.session_processor.session_processor_common import CanceledException, SessionProcessorStatus
from invokeai.app.services.session_queue.session_queue_common import SessionQueueItem, SessionQueueItemNotFoundError from invokeai.app.services.session_queue.session_queue_common import SessionQueueItem, SessionQueueItemNotFoundError
from invokeai.app.services.shared.graph import NodeInputError from invokeai.app.services.shared.graph import NodeInputError
from invokeai.app.services.shared.invocation_context import InvocationContextData, build_invocation_context from invokeai.app.services.shared.invocation_context import InvocationContextData, build_invocation_context
from invokeai.app.util.profiler import Profiler from invokeai.app.util.profiler import Profiler
from ..invoker import Invoker
from .session_processor_base import InvocationServices, SessionProcessorBase, SessionRunnerBase
from .session_processor_common import SessionProcessorStatus
class DefaultSessionRunner(SessionRunnerBase): class DefaultSessionRunner(SessionRunnerBase):
"""Processes a single session's invocations.""" """Processes a single session's invocations."""

View File

@ -1,6 +1,6 @@
import os import os
from .urls_base import UrlServiceBase from invokeai.app.services.urls.urls_base import UrlServiceBase
class LocalUrlService(UrlServiceBase): class LocalUrlService(UrlServiceBase):

View File

@ -5,9 +5,8 @@ from PIL import Image
from invokeai.app.services.session_processor.session_processor_common import CanceledException, ProgressImage from invokeai.app.services.session_processor.session_processor_common import CanceledException, ProgressImage
from invokeai.backend.model_manager.config import BaseModelType from invokeai.backend.model_manager.config import BaseModelType
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
from ...backend.stable_diffusion import PipelineIntermediateState from invokeai.backend.util.util import image_to_dataURL
from ...backend.util.util import image_to_dataURL
if TYPE_CHECKING: if TYPE_CHECKING:
from invokeai.app.services.events.events_base import EventServiceBase from invokeai.app.services.events.events_base import EventServiceBase

View File

@ -2,6 +2,11 @@
Initialization file for invokeai.backend.image_util methods. Initialization file for invokeai.backend.image_util methods.
""" """
from .infill_methods.patchmatch import PatchMatch # noqa: F401 from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch # noqa: F401
from .pngwriter import PngWriter, PromptFormatter, retrieve_metadata, write_metadata # noqa: F401 from invokeai.backend.image_util.pngwriter import ( # noqa: F401
from .util import InitImageResizer, make_grid # noqa: F401 PngWriter,
PromptFormatter,
retrieve_metadata,
write_metadata,
)
from invokeai.backend.image_util.util import InitImageResizer, make_grid # noqa: F401

View File

@ -2,7 +2,7 @@ import torch
from torch import nn as nn from torch import nn as nn
from torch.nn import functional as F from torch.nn import functional as F
from .arch_util import default_init_weights, make_layer, pixel_unshuffle from invokeai.backend.image_util.basicsr.arch_util import default_init_weights, make_layer, pixel_unshuffle
class ResidualDenseBlock(nn.Module): class ResidualDenseBlock(nn.Module):

View File

@ -4,7 +4,7 @@ import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from .blocks import FeatureFusionBlock, _make_scratch from invokeai.backend.image_util.depth_anything.model.blocks import FeatureFusionBlock, _make_scratch
torchhub_path = Path(__file__).parent.parent / "torchhub" torchhub_path = Path(__file__).parent.parent / "torchhub"

View File

@ -8,11 +8,10 @@ import numpy as np
import onnxruntime as ort import onnxruntime as ort
from invokeai.app.services.config.config_default import get_config from invokeai.app.services.config.config_default import get_config
from invokeai.backend.image_util.dw_openpose.onnxdet import inference_detector
from invokeai.backend.image_util.dw_openpose.onnxpose import inference_pose
from invokeai.backend.util.devices import TorchDevice from invokeai.backend.util.devices import TorchDevice
from .onnxdet import inference_detector
from .onnxpose import inference_pose
config = get_config() config = get_config()

View File

@ -11,9 +11,8 @@ from PIL import Image
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from invokeai.backend.ip_adapter.ip_attention_weights import IPAttentionWeights from invokeai.backend.ip_adapter.ip_attention_weights import IPAttentionWeights
from invokeai.backend.ip_adapter.resampler import Resampler
from ..raw_model import RawModel from invokeai.backend.raw_model import RawModel
from .resampler import Resampler
class IPAdapterStateDict(TypedDict): class IPAdapterStateDict(TypedDict):

View File

@ -10,10 +10,9 @@ from safetensors.torch import load_file
from typing_extensions import Self from typing_extensions import Self
from invokeai.backend.model_manager import BaseModelType from invokeai.backend.model_manager import BaseModelType
from invokeai.backend.raw_model import RawModel
from invokeai.backend.util.devices import TorchDevice from invokeai.backend.util.devices import TorchDevice
from .raw_model import RawModel
class LoRALayerBase: class LoRALayerBase:
# rank: Optional[int] # rank: Optional[int]

View File

@ -1,6 +1,6 @@
"""Re-export frequently-used symbols from the Model Manager backend.""" """Re-export frequently-used symbols from the Model Manager backend."""
from .config import ( from invokeai.backend.model_manager.config import (
AnyModel, AnyModel,
AnyModelConfig, AnyModelConfig,
BaseModelType, BaseModelType,
@ -13,9 +13,9 @@ from .config import (
SchedulerPredictionType, SchedulerPredictionType,
SubModelType, SubModelType,
) )
from .load import LoadedModel from invokeai.backend.model_manager.load import LoadedModel
from .probe import ModelProbe from invokeai.backend.model_manager.probe import ModelProbe
from .search import ModelSearch from invokeai.backend.model_manager.search import ModelSearch
__all__ = [ __all__ = [
"AnyModel", "AnyModel",

View File

@ -33,8 +33,7 @@ from typing_extensions import Annotated, Any, Dict
from invokeai.app.invocations.constants import SCHEDULER_NAME_VALUES from invokeai.app.invocations.constants import SCHEDULER_NAME_VALUES
from invokeai.app.util.misc import uuid_string from invokeai.app.util.misc import uuid_string
from invokeai.backend.model_hash.hash_validator import validate_hash from invokeai.backend.model_hash.hash_validator import validate_hash
from invokeai.backend.raw_model import RawModel
from ..raw_model import RawModel
# ModelMixin is the base class for all diffusers and transformers models # ModelMixin is the base class for all diffusers and transformers models
# RawModel is the InvokeAI wrapper class for ip_adapters, loras, textual_inversion and onnx runtime # RawModel is the InvokeAI wrapper class for ip_adapters, loras, textual_inversion and onnx runtime

View File

@ -6,10 +6,10 @@ Init file for the model loader.
from importlib import import_module from importlib import import_module
from pathlib import Path from pathlib import Path
from .load_base import LoadedModel, LoadedModelWithoutConfig, ModelLoaderBase from invokeai.backend.model_manager.load.load_base import LoadedModel, LoadedModelWithoutConfig, ModelLoaderBase
from .load_default import ModelLoader from invokeai.backend.model_manager.load.load_default import ModelLoader
from .model_cache.model_cache_default import ModelCache from invokeai.backend.model_manager.load.model_cache.model_cache_default import ModelCache
from .model_loader_registry import ModelLoaderRegistry, ModelLoaderRegistryBase from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry, ModelLoaderRegistryBase
# This registers the subclasses that implement loaders of specific model types # This registers the subclasses that implement loaders of specific model types
loaders = [x.stem for x in Path(Path(__file__).parent, "model_loaders").glob("*.py") if x.stem != "__init__"] loaders = [x.stem for x in Path(Path(__file__).parent, "model_loaders").glob("*.py") if x.stem != "__init__"]

View File

@ -5,7 +5,7 @@ import psutil
import torch import torch
from typing_extensions import Self from typing_extensions import Self
from ..util.libc_util import LibcUtil, Struct_mallinfo2 from invokeai.backend.model_manager.util.libc_util import LibcUtil, Struct_mallinfo2
GB = 2**30 # 1 GB GB = 2**30 # 1 GB

View File

@ -29,13 +29,17 @@ import torch
from invokeai.backend.model_manager import AnyModel, SubModelType from invokeai.backend.model_manager import AnyModel, SubModelType
from invokeai.backend.model_manager.load.memory_snapshot import MemorySnapshot, get_pretty_snapshot_diff from invokeai.backend.model_manager.load.memory_snapshot import MemorySnapshot, get_pretty_snapshot_diff
from invokeai.backend.model_manager.load.model_cache.model_cache_base import (
CacheRecord,
CacheStats,
ModelCacheBase,
ModelLockerBase,
)
from invokeai.backend.model_manager.load.model_cache.model_locker import ModelLocker
from invokeai.backend.model_manager.load.model_util import calc_model_size_by_data from invokeai.backend.model_manager.load.model_util import calc_model_size_by_data
from invokeai.backend.util.devices import TorchDevice from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.logging import InvokeAILogger from invokeai.backend.util.logging import InvokeAILogger
from .model_cache_base import CacheRecord, CacheStats, ModelCacheBase, ModelLockerBase
from .model_locker import ModelLocker
# Maximum size of the cache, in gigs # Maximum size of the cache, in gigs
# Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously # Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously
DEFAULT_MAX_CACHE_SIZE = 6.0 DEFAULT_MAX_CACHE_SIZE = 6.0

View File

@ -7,8 +7,11 @@ from typing import Dict, Optional
import torch import torch
from invokeai.backend.model_manager import AnyModel from invokeai.backend.model_manager import AnyModel
from invokeai.backend.model_manager.load.model_cache.model_cache_base import (
from .model_cache_base import CacheRecord, ModelCacheBase, ModelLockerBase CacheRecord,
ModelCacheBase,
ModelLockerBase,
)
class ModelLocker(ModelLockerBase): class ModelLocker(ModelLockerBase):

View File

@ -18,7 +18,7 @@ Use like this:
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Callable, Dict, Optional, Tuple, Type, TypeVar from typing import Callable, Dict, Optional, Tuple, Type, TypeVar
from ..config import ( from invokeai.backend.model_manager.config import (
AnyModelConfig, AnyModelConfig,
BaseModelType, BaseModelType,
ModelConfigBase, ModelConfigBase,
@ -26,7 +26,7 @@ from ..config import (
ModelType, ModelType,
SubModelType, SubModelType,
) )
from . import ModelLoaderBase from invokeai.backend.model_manager.load import ModelLoaderBase
class ModelLoaderRegistryBase(ABC): class ModelLoaderRegistryBase(ABC):

View File

@ -13,9 +13,8 @@ from invokeai.backend.model_manager import (
ModelType, ModelType,
) )
from invokeai.backend.model_manager.config import ControlNetCheckpointConfig, SubModelType from invokeai.backend.model_manager.config import ControlNetCheckpointConfig, SubModelType
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from .. import ModelLoaderRegistry from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
from .generic_diffusers import GenericDiffusersLoader
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ControlNet, format=ModelFormat.Diffusers) @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ControlNet, format=ModelFormat.Diffusers)

View File

@ -18,8 +18,8 @@ from invokeai.backend.model_manager import (
SubModelType, SubModelType,
) )
from invokeai.backend.model_manager.config import DiffusersConfigBase from invokeai.backend.model_manager.config import DiffusersConfigBase
from invokeai.backend.model_manager.load.load_default import ModelLoader
from .. import ModelLoader, ModelLoaderRegistry from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.CLIPVision, format=ModelFormat.Diffusers) @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.CLIPVision, format=ModelFormat.Diffusers)

View File

@ -15,9 +15,9 @@ from invokeai.backend.model_manager import (
ModelType, ModelType,
SubModelType, SubModelType,
) )
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_cache.model_cache_base import ModelCacheBase from invokeai.backend.model_manager.load.model_cache.model_cache_base import ModelCacheBase
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from .. import ModelLoader, ModelLoaderRegistry
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.LoRA, format=ModelFormat.Diffusers) @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.LoRA, format=ModelFormat.Diffusers)

View File

@ -13,9 +13,8 @@ from invokeai.backend.model_manager import (
ModelType, ModelType,
SubModelType, SubModelType,
) )
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from .. import ModelLoaderRegistry from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
from .generic_diffusers import GenericDiffusersLoader
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ONNX, format=ModelFormat.ONNX) @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ONNX, format=ModelFormat.ONNX)

View File

@ -25,11 +25,10 @@ from invokeai.backend.model_manager.config import (
DiffusersConfigBase, DiffusersConfigBase,
MainCheckpointConfig, MainCheckpointConfig,
) )
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
from invokeai.backend.util.silence_warnings import SilenceWarnings from invokeai.backend.util.silence_warnings import SilenceWarnings
from .. import ModelLoaderRegistry
from .generic_diffusers import GenericDiffusersLoader
VARIANT_TO_IN_CHANNEL_MAP = { VARIANT_TO_IN_CHANNEL_MAP = {
ModelVariantType.Normal: 4, ModelVariantType.Normal: 4,
ModelVariantType.Depth: 5, ModelVariantType.Depth: 5,

View File

@ -12,10 +12,10 @@ from invokeai.backend.model_manager import (
ModelType, ModelType,
SubModelType, SubModelType,
) )
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.textual_inversion import TextualInversionModelRaw from invokeai.backend.textual_inversion import TextualInversionModelRaw
from .. import ModelLoader, ModelLoaderRegistry
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.TextualInversion, format=ModelFormat.EmbeddingFile) @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.TextualInversion, format=ModelFormat.EmbeddingFile)
@ModelLoaderRegistry.register( @ModelLoaderRegistry.register(

View File

@ -12,9 +12,8 @@ from invokeai.backend.model_manager import (
ModelType, ModelType,
) )
from invokeai.backend.model_manager.config import AnyModel, SubModelType, VAECheckpointConfig from invokeai.backend.model_manager.config import AnyModel, SubModelType, VAECheckpointConfig
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from .. import ModelLoaderRegistry from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
from .generic_diffusers import GenericDiffusersLoader
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.VAE, format=ModelFormat.Diffusers) @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.VAE, format=ModelFormat.Diffusers)

View File

@ -17,16 +17,10 @@ from diffusers.utils import logging as dlogging
from invokeai.app.services.model_install import ModelInstallServiceBase from invokeai.app.services.model_install import ModelInstallServiceBase
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
from invokeai.backend.model_manager import AnyModelConfig, BaseModelType, ModelType, ModelVariantType
from invokeai.backend.model_manager.config import MainDiffusersConfig
from invokeai.backend.util.devices import TorchDevice from invokeai.backend.util.devices import TorchDevice
from . import (
AnyModelConfig,
BaseModelType,
ModelType,
ModelVariantType,
)
from .config import MainDiffusersConfig
class MergeInterpolationMethod(str, Enum): class MergeInterpolationMethod(str, Enum):
WeightedSum = "weighted_sum" WeightedSum = "weighted_sum"

View File

@ -16,8 +16,8 @@ data = HuggingFaceMetadataFetch().from_id("<REPO_ID>")
assert isinstance(data, HuggingFaceMetadata) assert isinstance(data, HuggingFaceMetadata)
""" """
from .fetch import HuggingFaceMetadataFetch, ModelMetadataFetchBase from invokeai.backend.model_manager.metadata.fetch import HuggingFaceMetadataFetch, ModelMetadataFetchBase
from .metadata_base import ( from invokeai.backend.model_manager.metadata.metadata_base import (
AnyModelRepoMetadata, AnyModelRepoMetadata,
AnyModelRepoMetadataValidator, AnyModelRepoMetadataValidator,
BaseMetadata, BaseMetadata,

View File

@ -10,7 +10,7 @@ data = HuggingFaceMetadataFetch().from_id("<repo_id>")
assert isinstance(data, HuggingFaceMetadata) assert isinstance(data, HuggingFaceMetadata)
""" """
from .fetch_base import ModelMetadataFetchBase from invokeai.backend.model_manager.metadata.fetch.fetch_base import ModelMetadataFetchBase
from .huggingface import HuggingFaceMetadataFetch from invokeai.backend.model_manager.metadata.fetch.huggingface import HuggingFaceMetadataFetch
__all__ = ["ModelMetadataFetchBase", "HuggingFaceMetadataFetch"] __all__ = ["ModelMetadataFetchBase", "HuggingFaceMetadataFetch"]

View File

@ -18,8 +18,11 @@ from pydantic.networks import AnyHttpUrl
from requests.sessions import Session from requests.sessions import Session
from invokeai.backend.model_manager import ModelRepoVariant from invokeai.backend.model_manager import ModelRepoVariant
from invokeai.backend.model_manager.metadata.metadata_base import (
from ..metadata_base import AnyModelRepoMetadata, AnyModelRepoMetadataValidator, BaseMetadata AnyModelRepoMetadata,
AnyModelRepoMetadataValidator,
BaseMetadata,
)
class ModelMetadataFetchBase(ABC): class ModelMetadataFetchBase(ABC):

View File

@ -25,14 +25,13 @@ from pydantic.networks import AnyHttpUrl
from requests.sessions import Session from requests.sessions import Session
from invokeai.backend.model_manager.config import ModelRepoVariant from invokeai.backend.model_manager.config import ModelRepoVariant
from invokeai.backend.model_manager.metadata.fetch.fetch_base import ModelMetadataFetchBase
from ..metadata_base import ( from invokeai.backend.model_manager.metadata.metadata_base import (
AnyModelRepoMetadata, AnyModelRepoMetadata,
HuggingFaceMetadata, HuggingFaceMetadata,
RemoteModelFile, RemoteModelFile,
UnknownMetadataException, UnknownMetadataException,
) )
from .fetch_base import ModelMetadataFetchBase
HF_MODEL_RE = r"https?://huggingface.co/([\w\-.]+/[\w\-.]+)" HF_MODEL_RE = r"https?://huggingface.co/([\w\-.]+/[\w\-.]+)"

View File

@ -24,8 +24,7 @@ from requests.sessions import Session
from typing_extensions import Annotated from typing_extensions import Annotated
from invokeai.backend.model_manager import ModelRepoVariant from invokeai.backend.model_manager import ModelRepoVariant
from invokeai.backend.model_manager.util.select_hf_files import filter_files
from ..util import select_hf_files
class UnknownMetadataException(Exception): class UnknownMetadataException(Exception):
@ -112,9 +111,7 @@ class HuggingFaceMetadata(ModelMetadataWithFiles):
session = session or Session() session = session or Session()
configure_http_backend(backend_factory=lambda: session) # used in testing configure_http_backend(backend_factory=lambda: session) # used in testing
paths = select_hf_files.filter_files( paths = filter_files([x.path for x in self.files], variant, subfolder) # all files in the model
[x.path for x in self.files], variant, subfolder
) # all files in the model
prefix = f"{subfolder}/" if subfolder else "" prefix = f"{subfolder}/" if subfolder else ""
# the next step reads model_index.json to determine which subdirectories belong # the next step reads model_index.json to determine which subdirectories belong
# to the model # to the model

View File

@ -10,9 +10,7 @@ from picklescan.scanner import scan_file_path
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.util.misc import uuid_string from invokeai.app.util.misc import uuid_string
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash
from invokeai.backend.util.silence_warnings import SilenceWarnings from invokeai.backend.model_manager.config import (
from .config import (
AnyModelConfig, AnyModelConfig,
BaseModelType, BaseModelType,
ControlAdapterDefaultSettings, ControlAdapterDefaultSettings,
@ -26,7 +24,8 @@ from .config import (
ModelVariantType, ModelVariantType,
SchedulerPredictionType, SchedulerPredictionType,
) )
from .util.model_util import lora_token_vector_length, read_checkpoint_meta from invokeai.backend.model_manager.util.model_util import lora_token_vector_length, read_checkpoint_meta
from invokeai.backend.util.silence_warnings import SilenceWarnings
CkptType = Dict[str | int, Any] CkptType = Dict[str | int, Any]

View File

@ -17,7 +17,7 @@ from dataclasses import dataclass
from pathlib import Path from pathlib import Path
from typing import Dict, List, Optional, Set from typing import Dict, List, Optional, Set
from ..config import ModelRepoVariant from invokeai.backend.model_manager.config import ModelRepoVariant
def filter_files( def filter_files(

View File

@ -13,14 +13,13 @@ from diffusers import OnnxRuntimeModel, UNet2DConditionModel
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from invokeai.app.shared.models import FreeUConfig from invokeai.app.shared.models import FreeUConfig
from invokeai.backend.lora import LoRAModelRaw
from invokeai.backend.model_manager import AnyModel from invokeai.backend.model_manager import AnyModel
from invokeai.backend.model_manager.load.optimizations import skip_torch_weight_init from invokeai.backend.model_manager.load.optimizations import skip_torch_weight_init
from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel
from invokeai.backend.textual_inversion import TextualInversionManager, TextualInversionModelRaw
from invokeai.backend.util.devices import TorchDevice from invokeai.backend.util.devices import TorchDevice
from .lora import LoRAModelRaw
from .textual_inversion import TextualInversionManager, TextualInversionModelRaw
""" """
loras = [ loras = [
(lora_model1, 0.7), (lora_model1, 0.7),
@ -338,7 +337,7 @@ class ONNXModelPatcher:
loras: List[Tuple[LoRAModelRaw, float]], loras: List[Tuple[LoRAModelRaw, float]],
prefix: str, prefix: str,
) -> None: ) -> None:
from .models.base import IAIOnnxRuntimeModel from invokeai.backend.models.base import IAIOnnxRuntimeModel
if not isinstance(model, IAIOnnxRuntimeModel): if not isinstance(model, IAIOnnxRuntimeModel):
raise Exception("Only IAIOnnxRuntimeModel models supported") raise Exception("Only IAIOnnxRuntimeModel models supported")
@ -425,7 +424,7 @@ class ONNXModelPatcher:
text_encoder: IAIOnnxRuntimeModel, text_encoder: IAIOnnxRuntimeModel,
ti_list: List[Tuple[str, Any]], ti_list: List[Tuple[str, Any]],
) -> Iterator[Tuple[CLIPTokenizer, TextualInversionManager]]: ) -> Iterator[Tuple[CLIPTokenizer, TextualInversionManager]]:
from .models.base import IAIOnnxRuntimeModel from invokeai.backend.models.base import IAIOnnxRuntimeModel
if not isinstance(text_encoder, IAIOnnxRuntimeModel): if not isinstance(text_encoder, IAIOnnxRuntimeModel):
raise Exception("Only IAIOnnxRuntimeModel models supported") raise Exception("Only IAIOnnxRuntimeModel models supported")

View File

@ -10,7 +10,7 @@ import torch
from onnx import numpy_helper from onnx import numpy_helper
from onnxruntime import InferenceSession, SessionOptions, get_available_providers from onnxruntime import InferenceSession, SessionOptions, get_available_providers
from ..raw_model import RawModel from invokeai.backend.raw_model import RawModel
ONNX_WEIGHTS_NAME = "model.onnx" ONNX_WEIGHTS_NAME = "model.onnx"

View File

@ -2,9 +2,12 @@
Initialization file for the invokeai.backend.stable_diffusion package Initialization file for the invokeai.backend.stable_diffusion package
""" """
from .diffusers_pipeline import PipelineIntermediateState, StableDiffusionGeneratorPipeline # noqa: F401 from invokeai.backend.stable_diffusion.diffusers_pipeline import ( # noqa: F401
from .diffusion import InvokeAIDiffuserComponent # noqa: F401 PipelineIntermediateState,
from .seamless import set_seamless # noqa: F401 StableDiffusionGeneratorPipeline,
)
from invokeai.backend.stable_diffusion.diffusion import InvokeAIDiffuserComponent # noqa: F401
from invokeai.backend.stable_diffusion.seamless import set_seamless # noqa: F401
__all__ = [ __all__ = [
"PipelineIntermediateState", "PipelineIntermediateState",

View File

@ -2,4 +2,6 @@
Initialization file for invokeai.models.diffusion Initialization file for invokeai.models.diffusion
""" """
from .shared_invokeai_diffusion import InvokeAIDiffuserComponent # noqa: F401 from invokeai.backend.stable_diffusion.diffusion.shared_invokeai_diffusion import (
InvokeAIDiffuserComponent, # noqa: F401
)

View File

@ -1,3 +1,3 @@
from .schedulers import SCHEDULER_MAP # noqa: F401 from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_MAP # noqa: F401
__all__ = ["SCHEDULER_MAP"] __all__ = ["SCHEDULER_MAP"]

View File

@ -9,7 +9,7 @@ from safetensors.torch import load_file
from transformers import CLIPTokenizer from transformers import CLIPTokenizer
from typing_extensions import Self from typing_extensions import Self
from .raw_model import RawModel from invokeai.backend.raw_model import RawModel
class TextualInversionModelRaw(RawModel): class TextualInversionModelRaw(RawModel):

View File

@ -2,8 +2,8 @@
Initialization file for invokeai.backend.util Initialization file for invokeai.backend.util
""" """
from .logging import InvokeAILogger from invokeai.backend.util.logging import InvokeAILogger
from .util import GIG, Chdir, directory_size from invokeai.backend.util.util import GIG, Chdir, directory_size
__all__ = [ __all__ = [
"GIG", "GIG",

View File

@ -2,7 +2,7 @@
initialization file for invokeai initialization file for invokeai
""" """
from .invokeai_version import __version__ # noqa: F401 from invokeai.version.invokeai_version import __version__ # noqa: F401
__app_id__ = "invoke-ai/InvokeAI" __app_id__ = "invoke-ai/InvokeAI"
__app_name__ = "InvokeAI" __app_name__ = "InvokeAI"

View File

@ -206,7 +206,12 @@ ignore = [
"B008", # https://docs.astral.sh/ruff/rules/function-call-in-default-argument/ "B008", # https://docs.astral.sh/ruff/rules/function-call-in-default-argument/
"B904", # https://docs.astral.sh/ruff/rules/raise-without-from-inside-except/ "B904", # https://docs.astral.sh/ruff/rules/raise-without-from-inside-except/
] ]
select = ["B", "C", "E", "F", "W", "I"] select = ["B", "C", "E", "F", "W", "I", "TID"]
[tool.ruff.lint.flake8-tidy-imports]
# Disallow all relative imports.
ban-relative-imports = "all"
#=== End: Ruff #=== End: Ruff
#=== Begin: MyPy #=== Begin: MyPy

View File

@ -3,13 +3,6 @@ from unittest.mock import Mock
import pytest import pytest
# This import must happen before other invoke imports or test in other files(!!) break
from .test_nodes import ( # isort: split
PromptCollectionTestInvocation,
PromptTestInvocation,
TextToImageTestInvocation,
)
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext
from invokeai.app.invocations.collections import RangeInvocation from invokeai.app.invocations.collections import RangeInvocation
from invokeai.app.invocations.math import AddInvocation, MultiplyInvocation from invokeai.app.invocations.math import AddInvocation, MultiplyInvocation
@ -20,7 +13,13 @@ from invokeai.app.services.shared.graph import (
IterateInvocation, IterateInvocation,
) )
from .test_nodes import create_edge # This import must happen before other invoke imports or test in other files(!!) break
from tests.test_nodes import (
PromptCollectionTestInvocation,
PromptTestInvocation,
TextToImageTestInvocation,
create_edge,
)
@pytest.fixture @pytest.fixture

View File

@ -27,8 +27,7 @@ from invokeai.app.services.shared.graph import (
NodeNotFoundError, NodeNotFoundError,
are_connections_compatible, are_connections_compatible,
) )
from tests.test_nodes import (
from .test_nodes import (
AnyTypeTestInvocation, AnyTypeTestInvocation,
ImageToImageTestInvocation, ImageToImageTestInvocation,
ListPassThroughInvocation, ListPassThroughInvocation,

View File

@ -11,8 +11,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
prepare_values_to_insert, prepare_values_to_insert,
) )
from invokeai.app.services.shared.graph import Graph, GraphExecutionState from invokeai.app.services.shared.graph import Graph, GraphExecutionState
from tests.test_nodes import PromptTestInvocation
from .test_nodes import PromptTestInvocation
@pytest.fixture @pytest.fixture