diff --git a/invokeai/app/api/dependencies.py b/invokeai/app/api/dependencies.py index 44538ceefd..c9a2f0a843 100644 --- a/invokeai/app/api/dependencies.py +++ b/invokeai/app/api/dependencies.py @@ -2,33 +2,34 @@ from logging import Logger -from invokeai.app.services.board_image_record_storage import SqliteBoardImageRecordStorage -from invokeai.app.services.board_images import BoardImagesService -from invokeai.app.services.board_record_storage import SqliteBoardRecordStorage -from invokeai.app.services.boards import BoardService -from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.app.services.image_record_storage import SqliteImageRecordStorage -from invokeai.app.services.images import ImageService -from invokeai.app.services.invocation_cache.invocation_cache_memory import MemoryInvocationCache -from invokeai.app.services.resource_name import SimpleNameService -from invokeai.app.services.session_processor.session_processor_default import DefaultSessionProcessor -from invokeai.app.services.session_queue.session_queue_sqlite import SqliteSessionQueue -from invokeai.app.services.shared.sqlite import SqliteDatabase -from invokeai.app.services.urls import LocalUrlService from invokeai.backend.util.logging import InvokeAILogger from invokeai.version.invokeai_version import __version__ -from ..services.default_graphs import create_system_graphs -from ..services.graph import GraphExecutionState, LibraryGraph -from ..services.image_file_storage import DiskImageFileStorage -from ..services.invocation_queue import MemoryInvocationQueue +from ..services.board_image_records.board_image_records_sqlite import SqliteBoardImageRecordStorage +from ..services.board_images.board_images_default import BoardImagesService +from ..services.board_records.board_records_sqlite import SqliteBoardRecordStorage +from ..services.boards.boards_default import BoardService +from ..services.config import InvokeAIAppConfig +from ..services.image_files.image_files_disk import DiskImageFileStorage +from ..services.image_records.image_records_sqlite import SqliteImageRecordStorage +from ..services.images.images_default import ImageService +from ..services.invocation_cache.invocation_cache_memory import MemoryInvocationCache +from ..services.invocation_processor.invocation_processor_default import DefaultInvocationProcessor +from ..services.invocation_queue.invocation_queue_memory import MemoryInvocationQueue from ..services.invocation_services import InvocationServices -from ..services.invocation_stats import InvocationStatsService +from ..services.invocation_stats.invocation_stats_default import InvocationStatsService from ..services.invoker import Invoker -from ..services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage -from ..services.model_manager_service import ModelManagerService -from ..services.processor import DefaultInvocationProcessor -from ..services.sqlite import SqliteItemStorage +from ..services.item_storage.item_storage_sqlite import SqliteItemStorage +from ..services.latents_storage.latents_storage_disk import DiskLatentsStorage +from ..services.latents_storage.latents_storage_forward_cache import ForwardCacheLatentsStorage +from ..services.model_manager.model_manager_default import ModelManagerService +from ..services.names.names_default import SimpleNameService +from ..services.session_processor.session_processor_default import DefaultSessionProcessor +from ..services.session_queue.session_queue_sqlite import SqliteSessionQueue +from ..services.shared.default_graphs import create_system_graphs +from ..services.shared.graph import GraphExecutionState, LibraryGraph +from ..services.shared.sqlite import SqliteDatabase +from ..services.urls.urls_default import LocalUrlService from .events import FastAPIEventService diff --git a/invokeai/app/api/events.py b/invokeai/app/api/events.py index 41414a9230..40dfdb2c71 100644 --- a/invokeai/app/api/events.py +++ b/invokeai/app/api/events.py @@ -7,7 +7,7 @@ from typing import Any from fastapi_events.dispatcher import dispatch -from ..services.events import EventServiceBase +from ..services.events.events_base import EventServiceBase class FastAPIEventService(EventServiceBase): diff --git a/invokeai/app/api/routers/boards.py b/invokeai/app/api/routers/boards.py index cc6fbc4e29..69f965da64 100644 --- a/invokeai/app/api/routers/boards.py +++ b/invokeai/app/api/routers/boards.py @@ -4,8 +4,8 @@ from fastapi import Body, HTTPException, Path, Query from fastapi.routing import APIRouter from pydantic import BaseModel, Field -from invokeai.app.services.board_record_storage import BoardChanges -from invokeai.app.services.models.board_record import BoardDTO +from invokeai.app.services.board_records.board_records_common import BoardChanges +from invokeai.app.services.boards.boards_common import BoardDTO from invokeai.app.services.shared.pagination import OffsetPaginatedResults from ..dependencies import ApiDependencies diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py index faa8eb8bb2..7b61887eb8 100644 --- a/invokeai/app/api/routers/images.py +++ b/invokeai/app/api/routers/images.py @@ -8,8 +8,8 @@ from PIL import Image from pydantic import BaseModel, Field from invokeai.app.invocations.metadata import ImageMetadata -from invokeai.app.models.image import ImageCategory, ResourceOrigin -from invokeai.app.services.models.image_record import ImageDTO, ImageRecordChanges, ImageUrlsDTO +from invokeai.app.services.image_records.image_records_common import ImageCategory, ImageRecordChanges, ResourceOrigin +from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO from invokeai.app.services.shared.pagination import OffsetPaginatedResults from ..dependencies import ApiDependencies diff --git a/invokeai/app/api/routers/session_queue.py b/invokeai/app/api/routers/session_queue.py index 89329c153b..7ecb0504a3 100644 --- a/invokeai/app/api/routers/session_queue.py +++ b/invokeai/app/api/routers/session_queue.py @@ -18,9 +18,9 @@ from invokeai.app.services.session_queue.session_queue_common import ( SessionQueueItemDTO, SessionQueueStatus, ) +from invokeai.app.services.shared.graph import Graph from invokeai.app.services.shared.pagination import CursorPaginatedResults -from ...services.graph import Graph from ..dependencies import ApiDependencies session_queue_router = APIRouter(prefix="/v1/queue", tags=["queue"]) diff --git a/invokeai/app/api/routers/sessions.py b/invokeai/app/api/routers/sessions.py index 31a7b952a0..cd93a267ad 100644 --- a/invokeai/app/api/routers/sessions.py +++ b/invokeai/app/api/routers/sessions.py @@ -11,7 +11,7 @@ from invokeai.app.services.shared.pagination import PaginatedResults # Importing * is bad karma but needed here for node detection from ...invocations import * # noqa: F401 F403 from ...invocations.baseinvocation import BaseInvocation -from ...services.graph import Edge, EdgeConnection, Graph, GraphExecutionState, NodeAlreadyExecutedError +from ...services.shared.graph import Edge, EdgeConnection, Graph, GraphExecutionState, NodeAlreadyExecutedError from ..dependencies import ApiDependencies session_router = APIRouter(prefix="/v1/sessions", tags=["sessions"]) diff --git a/invokeai/app/api/sockets.py b/invokeai/app/api/sockets.py index ae699f35ef..f41c38786c 100644 --- a/invokeai/app/api/sockets.py +++ b/invokeai/app/api/sockets.py @@ -5,7 +5,7 @@ from fastapi_events.handlers.local import local_handler from fastapi_events.typing import Event from socketio import ASGIApp, AsyncServer -from ..services.events import EventServiceBase +from ..services.events.events_base import EventServiceBase class SocketIO: diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 497dafa102..d82b94d0e9 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -28,7 +28,7 @@ from pydantic import BaseModel, Field, validator from pydantic.fields import ModelField, Undefined from pydantic.typing import NoArgAnyCallable -from invokeai.app.services.config.invokeai_config import InvokeAIAppConfig +from invokeai.app.services.config.config_default import InvokeAIAppConfig if TYPE_CHECKING: from ..services.invocation_services import InvocationServices diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index 933c32c908..59a36935df 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -27,9 +27,9 @@ from PIL import Image from pydantic import BaseModel, Field, validator from invokeai.app.invocations.primitives import ImageField, ImageOutput +from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin from ...backend.model_management import BaseModelType -from ..models.image import ImageCategory, ResourceOrigin from .baseinvocation import ( BaseInvocation, BaseInvocationOutput, diff --git a/invokeai/app/invocations/cv.py b/invokeai/app/invocations/cv.py index cbe76091d6..3b85955d74 100644 --- a/invokeai/app/invocations/cv.py +++ b/invokeai/app/invocations/cv.py @@ -6,7 +6,7 @@ import numpy from PIL import Image, ImageOps from invokeai.app.invocations.primitives import ImageField, ImageOutput -from invokeai.app.models.image import ImageCategory, ResourceOrigin +from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index 0301768219..2d59a567c0 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -9,10 +9,10 @@ from PIL import Image, ImageChops, ImageFilter, ImageOps from invokeai.app.invocations.metadata import CoreMetadata from invokeai.app.invocations.primitives import BoardField, ColorField, ImageField, ImageOutput +from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark from invokeai.backend.image_util.safety_checker import SafetyChecker -from ..models.image import ImageCategory, ResourceOrigin from .baseinvocation import BaseInvocation, FieldDescriptions, Input, InputField, InvocationContext, invocation diff --git a/invokeai/app/invocations/infill.py b/invokeai/app/invocations/infill.py index e703b4ab41..d8384290f3 100644 --- a/invokeai/app/invocations/infill.py +++ b/invokeai/app/invocations/infill.py @@ -7,12 +7,12 @@ import numpy as np from PIL import Image, ImageOps from invokeai.app.invocations.primitives import ColorField, ImageField, ImageOutput +from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin from invokeai.app.util.misc import SEED_MAX, get_random_seed from invokeai.backend.image_util.cv2_inpaint import cv2_inpaint from invokeai.backend.image_util.lama import LaMA from invokeai.backend.image_util.patchmatch import PatchMatch -from ..models.image import ImageCategory, ResourceOrigin from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation from .image import PIL_RESAMPLING_MAP, PIL_RESAMPLING_MODES diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index c6bf37bdbc..7ca8cbbe6c 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -34,6 +34,7 @@ from invokeai.app.invocations.primitives import ( build_latents_output, ) from invokeai.app.invocations.t2i_adapter import T2IAdapterField +from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin from invokeai.app.util.controlnet_utils import prepare_control_image from invokeai.app.util.step_callback import stable_diffusion_step_callback from invokeai.backend.ip_adapter.ip_adapter import IPAdapter, IPAdapterPlus @@ -54,7 +55,6 @@ from ...backend.stable_diffusion.diffusers_pipeline import ( from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import PostprocessingSettings from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP from ...backend.util.devices import choose_precision, choose_torch_device -from ..models.image import ImageCategory, ResourceOrigin from .baseinvocation import ( BaseInvocation, BaseInvocationOutput, diff --git a/invokeai/app/invocations/onnx.py b/invokeai/app/invocations/onnx.py index 1d531d45a2..35f8ed965e 100644 --- a/invokeai/app/invocations/onnx.py +++ b/invokeai/app/invocations/onnx.py @@ -14,13 +14,13 @@ from tqdm import tqdm from invokeai.app.invocations.metadata import CoreMetadata from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput, ImageField, ImageOutput +from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin from invokeai.app.util.step_callback import stable_diffusion_step_callback from invokeai.backend import BaseModelType, ModelType, SubModelType from ...backend.model_management import ONNXModelPatcher from ...backend.stable_diffusion import PipelineIntermediateState from ...backend.util import choose_torch_device -from ..models.image import ImageCategory, ResourceOrigin from .baseinvocation import ( BaseInvocation, BaseInvocationOutput, diff --git a/invokeai/app/invocations/upscale.py b/invokeai/app/invocations/upscale.py index a1f3d2691a..e26c1b9084 100644 --- a/invokeai/app/invocations/upscale.py +++ b/invokeai/app/invocations/upscale.py @@ -10,7 +10,7 @@ from PIL import Image from realesrgan import RealESRGANer from invokeai.app.invocations.primitives import ImageField, ImageOutput -from invokeai.app.models.image import ImageCategory, ResourceOrigin +from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin from invokeai.backend.util.devices import choose_torch_device from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation diff --git a/invokeai/app/models/exceptions.py b/invokeai/app/models/exceptions.py deleted file mode 100644 index 662e1948ce..0000000000 --- a/invokeai/app/models/exceptions.py +++ /dev/null @@ -1,4 +0,0 @@ -class CanceledException(Exception): - """Execution canceled by user.""" - - pass diff --git a/invokeai/app/models/image.py b/invokeai/app/models/image.py deleted file mode 100644 index 88cf8af5f9..0000000000 --- a/invokeai/app/models/image.py +++ /dev/null @@ -1,71 +0,0 @@ -from enum import Enum - -from pydantic import BaseModel, Field - -from invokeai.app.util.metaenum import MetaEnum - - -class ProgressImage(BaseModel): - """The progress image sent intermittently during processing""" - - width: int = Field(description="The effective width of the image in pixels") - height: int = Field(description="The effective height of the image in pixels") - dataURL: str = Field(description="The image data as a b64 data URL") - - -class ResourceOrigin(str, Enum, metaclass=MetaEnum): - """The origin of a resource (eg image). - - - INTERNAL: The resource was created by the application. - - EXTERNAL: The resource was not created by the application. - This may be a user-initiated upload, or an internal application upload (eg Canvas init image). - """ - - INTERNAL = "internal" - """The resource was created by the application.""" - EXTERNAL = "external" - """The resource was not created by the application. - This may be a user-initiated upload, or an internal application upload (eg Canvas init image). - """ - - -class InvalidOriginException(ValueError): - """Raised when a provided value is not a valid ResourceOrigin. - - Subclasses `ValueError`. - """ - - def __init__(self, message="Invalid resource origin."): - super().__init__(message) - - -class ImageCategory(str, Enum, metaclass=MetaEnum): - """The category of an image. - - - GENERAL: The image is an output, init image, or otherwise an image without a specialized purpose. - - MASK: The image is a mask image. - - CONTROL: The image is a ControlNet control image. - - USER: The image is a user-provide image. - - OTHER: The image is some other type of image with a specialized purpose. To be used by external nodes. - """ - - GENERAL = "general" - """GENERAL: The image is an output, init image, or otherwise an image without a specialized purpose.""" - MASK = "mask" - """MASK: The image is a mask image.""" - CONTROL = "control" - """CONTROL: The image is a ControlNet control image.""" - USER = "user" - """USER: The image is a user-provide image.""" - OTHER = "other" - """OTHER: The image is some other type of image with a specialized purpose. To be used by external nodes.""" - - -class InvalidImageCategoryException(ValueError): - """Raised when a provided value is not a valid ImageCategory. - - Subclasses `ValueError`. - """ - - def __init__(self, message="Invalid image category."): - super().__init__(message) diff --git a/invokeai/app/models/__init__.py b/invokeai/app/services/board_image_records/__init__.py similarity index 100% rename from invokeai/app/models/__init__.py rename to invokeai/app/services/board_image_records/__init__.py diff --git a/invokeai/app/services/board_image_records/board_image_records_base.py b/invokeai/app/services/board_image_records/board_image_records_base.py new file mode 100644 index 0000000000..c8f7b35908 --- /dev/null +++ b/invokeai/app/services/board_image_records/board_image_records_base.py @@ -0,0 +1,47 @@ +from abc import ABC, abstractmethod +from typing import Optional + + +class BoardImageRecordStorageBase(ABC): + """Abstract base class for the one-to-many board-image relationship record storage.""" + + @abstractmethod + def add_image_to_board( + self, + board_id: str, + image_name: str, + ) -> None: + """Adds an image to a board.""" + pass + + @abstractmethod + def remove_image_from_board( + self, + image_name: str, + ) -> None: + """Removes an image from a board.""" + pass + + @abstractmethod + def get_all_board_image_names_for_board( + self, + board_id: str, + ) -> list[str]: + """Gets all board images for a board, as a list of the image names.""" + pass + + @abstractmethod + def get_board_for_image( + self, + image_name: str, + ) -> Optional[str]: + """Gets an image's board id, if it has one.""" + pass + + @abstractmethod + def get_image_count_for_board( + self, + board_id: str, + ) -> int: + """Gets the number of images for a board.""" + pass diff --git a/invokeai/app/services/board_image_record_storage.py b/invokeai/app/services/board_image_records/board_image_records_sqlite.py similarity index 85% rename from invokeai/app/services/board_image_record_storage.py rename to invokeai/app/services/board_image_records/board_image_records_sqlite.py index 63d09b45fb..df7505b797 100644 --- a/invokeai/app/services/board_image_record_storage.py +++ b/invokeai/app/services/board_image_records/board_image_records_sqlite.py @@ -1,56 +1,12 @@ import sqlite3 import threading -from abc import ABC, abstractmethod from typing import Optional, cast -from invokeai.app.services.models.image_record import ImageRecord, deserialize_image_record -from invokeai.app.services.shared.sqlite import SqliteDatabase +from invokeai.app.services.image_records.image_records_common import ImageRecord, deserialize_image_record from invokeai.app.services.shared.pagination import OffsetPaginatedResults +from invokeai.app.services.shared.sqlite import SqliteDatabase - -class BoardImageRecordStorageBase(ABC): - """Abstract base class for the one-to-many board-image relationship record storage.""" - - @abstractmethod - def add_image_to_board( - self, - board_id: str, - image_name: str, - ) -> None: - """Adds an image to a board.""" - pass - - @abstractmethod - def remove_image_from_board( - self, - image_name: str, - ) -> None: - """Removes an image from a board.""" - pass - - @abstractmethod - def get_all_board_image_names_for_board( - self, - board_id: str, - ) -> list[str]: - """Gets all board images for a board, as a list of the image names.""" - pass - - @abstractmethod - def get_board_for_image( - self, - image_name: str, - ) -> Optional[str]: - """Gets an image's board id, if it has one.""" - pass - - @abstractmethod - def get_image_count_for_board( - self, - board_id: str, - ) -> int: - """Gets the number of images for a board.""" - pass +from .board_image_records_base import BoardImageRecordStorageBase class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase): diff --git a/invokeai/app/services/board_images.py b/invokeai/app/services/board_images.py deleted file mode 100644 index 1cbc026dc9..0000000000 --- a/invokeai/app/services/board_images.py +++ /dev/null @@ -1,85 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Optional - -from invokeai.app.services.board_record_storage import BoardRecord -from invokeai.app.services.invoker import Invoker -from invokeai.app.services.models.board_record import BoardDTO - - -class BoardImagesServiceABC(ABC): - """High-level service for board-image relationship management.""" - - @abstractmethod - def add_image_to_board( - self, - board_id: str, - image_name: str, - ) -> None: - """Adds an image to a board.""" - pass - - @abstractmethod - def remove_image_from_board( - self, - image_name: str, - ) -> None: - """Removes an image from a board.""" - pass - - @abstractmethod - def get_all_board_image_names_for_board( - self, - board_id: str, - ) -> list[str]: - """Gets all board images for a board, as a list of the image names.""" - pass - - @abstractmethod - def get_board_for_image( - self, - image_name: str, - ) -> Optional[str]: - """Gets an image's board id, if it has one.""" - pass - - -class BoardImagesService(BoardImagesServiceABC): - __invoker: Invoker - - def start(self, invoker: Invoker) -> None: - self.__invoker = invoker - - def add_image_to_board( - self, - board_id: str, - image_name: str, - ) -> None: - self.__invoker.services.board_image_records.add_image_to_board(board_id, image_name) - - def remove_image_from_board( - self, - image_name: str, - ) -> None: - self.__invoker.services.board_image_records.remove_image_from_board(image_name) - - def get_all_board_image_names_for_board( - self, - board_id: str, - ) -> list[str]: - return self.__invoker.services.board_image_records.get_all_board_image_names_for_board(board_id) - - def get_board_for_image( - self, - image_name: str, - ) -> Optional[str]: - board_id = self.__invoker.services.board_image_records.get_board_for_image(image_name) - return board_id - - -def board_record_to_dto(board_record: BoardRecord, cover_image_name: Optional[str], image_count: int) -> BoardDTO: - """Converts a board record to a board DTO.""" - return BoardDTO( - **board_record.dict(exclude={"cover_image_name"}), - cover_image_name=cover_image_name, - image_count=image_count, - ) diff --git a/invokeai/app/services/board_images/__init__.py b/invokeai/app/services/board_images/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/app/services/board_images/board_images_base.py b/invokeai/app/services/board_images/board_images_base.py new file mode 100644 index 0000000000..356ff7068b --- /dev/null +++ b/invokeai/app/services/board_images/board_images_base.py @@ -0,0 +1,39 @@ +from abc import ABC, abstractmethod +from typing import Optional + + +class BoardImagesServiceABC(ABC): + """High-level service for board-image relationship management.""" + + @abstractmethod + def add_image_to_board( + self, + board_id: str, + image_name: str, + ) -> None: + """Adds an image to a board.""" + pass + + @abstractmethod + def remove_image_from_board( + self, + image_name: str, + ) -> None: + """Removes an image from a board.""" + pass + + @abstractmethod + def get_all_board_image_names_for_board( + self, + board_id: str, + ) -> list[str]: + """Gets all board images for a board, as a list of the image names.""" + pass + + @abstractmethod + def get_board_for_image( + self, + image_name: str, + ) -> Optional[str]: + """Gets an image's board id, if it has one.""" + pass diff --git a/invokeai/app/services/models/board_image.py b/invokeai/app/services/board_images/board_images_common.py similarity index 100% rename from invokeai/app/services/models/board_image.py rename to invokeai/app/services/board_images/board_images_common.py diff --git a/invokeai/app/services/board_images/board_images_default.py b/invokeai/app/services/board_images/board_images_default.py new file mode 100644 index 0000000000..85e478619c --- /dev/null +++ b/invokeai/app/services/board_images/board_images_default.py @@ -0,0 +1,38 @@ +from typing import Optional + +from invokeai.app.services.invoker import Invoker + +from .board_images_base import BoardImagesServiceABC + + +class BoardImagesService(BoardImagesServiceABC): + __invoker: Invoker + + def start(self, invoker: Invoker) -> None: + self.__invoker = invoker + + def add_image_to_board( + self, + board_id: str, + image_name: str, + ) -> None: + self.__invoker.services.board_image_records.add_image_to_board(board_id, image_name) + + def remove_image_from_board( + self, + image_name: str, + ) -> None: + self.__invoker.services.board_image_records.remove_image_from_board(image_name) + + def get_all_board_image_names_for_board( + self, + board_id: str, + ) -> list[str]: + return self.__invoker.services.board_image_records.get_all_board_image_names_for_board(board_id) + + def get_board_for_image( + self, + image_name: str, + ) -> Optional[str]: + board_id = self.__invoker.services.board_image_records.get_board_for_image(image_name) + return board_id diff --git a/invokeai/app/services/board_records/board_records_base.py b/invokeai/app/services/board_records/board_records_base.py new file mode 100644 index 0000000000..30f819618a --- /dev/null +++ b/invokeai/app/services/board_records/board_records_base.py @@ -0,0 +1,55 @@ +from abc import ABC, abstractmethod + +from invokeai.app.services.shared.pagination import OffsetPaginatedResults + +from .board_records_common import BoardChanges, BoardRecord + + +class BoardRecordStorageBase(ABC): + """Low-level service responsible for interfacing with the board record store.""" + + @abstractmethod + def delete(self, board_id: str) -> None: + """Deletes a board record.""" + pass + + @abstractmethod + def save( + self, + board_name: str, + ) -> BoardRecord: + """Saves a board record.""" + pass + + @abstractmethod + def get( + self, + board_id: str, + ) -> BoardRecord: + """Gets a board record.""" + pass + + @abstractmethod + def update( + self, + board_id: str, + changes: BoardChanges, + ) -> BoardRecord: + """Updates a board record.""" + pass + + @abstractmethod + def get_many( + self, + offset: int = 0, + limit: int = 10, + ) -> OffsetPaginatedResults[BoardRecord]: + """Gets many board records.""" + pass + + @abstractmethod + def get_all( + self, + ) -> list[BoardRecord]: + """Gets all board records.""" + pass diff --git a/invokeai/app/services/models/board_record.py b/invokeai/app/services/board_records/board_records_common.py similarity index 70% rename from invokeai/app/services/models/board_record.py rename to invokeai/app/services/board_records/board_records_common.py index 4b93d0ea23..e0264dde0d 100644 --- a/invokeai/app/services/models/board_record.py +++ b/invokeai/app/services/board_records/board_records_common.py @@ -1,7 +1,7 @@ from datetime import datetime from typing import Optional, Union -from pydantic import Field +from pydantic import BaseModel, Extra, Field from invokeai.app.util.misc import get_iso_timestamp from invokeai.app.util.model_exclude_null import BaseModelExcludeNull @@ -24,15 +24,6 @@ class BoardRecord(BaseModelExcludeNull): """The name of the cover image of the board.""" -class BoardDTO(BoardRecord): - """Deserialized board record with cover image URL and image count.""" - - cover_image_name: Optional[str] = Field(description="The name of the board's cover image.") - """The URL of the thumbnail of the most recent image in the board.""" - image_count: int = Field(description="The number of images in the board.") - """The number of images in the board.""" - - def deserialize_board_record(board_dict: dict) -> BoardRecord: """Deserializes a board record.""" @@ -53,3 +44,29 @@ def deserialize_board_record(board_dict: dict) -> BoardRecord: updated_at=updated_at, deleted_at=deleted_at, ) + + +class BoardChanges(BaseModel, extra=Extra.forbid): + board_name: Optional[str] = Field(description="The board's new name.") + cover_image_name: Optional[str] = Field(description="The name of the board's new cover image.") + + +class BoardRecordNotFoundException(Exception): + """Raised when an board record is not found.""" + + def __init__(self, message="Board record not found"): + super().__init__(message) + + +class BoardRecordSaveException(Exception): + """Raised when an board record cannot be saved.""" + + def __init__(self, message="Board record not saved"): + super().__init__(message) + + +class BoardRecordDeleteException(Exception): + """Raised when an board record cannot be deleted.""" + + def __init__(self, message="Board record not deleted"): + super().__init__(message) diff --git a/invokeai/app/services/board_record_storage.py b/invokeai/app/services/board_records/board_records_sqlite.py similarity index 77% rename from invokeai/app/services/board_record_storage.py rename to invokeai/app/services/board_records/board_records_sqlite.py index dca549cd23..b2ddc931f5 100644 --- a/invokeai/app/services/board_record_storage.py +++ b/invokeai/app/services/board_records/board_records_sqlite.py @@ -1,90 +1,20 @@ import sqlite3 import threading -from abc import ABC, abstractmethod -from typing import Optional, Union, cast +from typing import Union, cast -from pydantic import BaseModel, Extra, Field - -from invokeai.app.services.models.board_record import BoardRecord, deserialize_board_record -from invokeai.app.services.shared.sqlite import SqliteDatabase from invokeai.app.services.shared.pagination import OffsetPaginatedResults +from invokeai.app.services.shared.sqlite import SqliteDatabase from invokeai.app.util.misc import uuid_string - -class BoardChanges(BaseModel, extra=Extra.forbid): - board_name: Optional[str] = Field(description="The board's new name.") - cover_image_name: Optional[str] = Field(description="The name of the board's new cover image.") - - -class BoardRecordNotFoundException(Exception): - """Raised when an board record is not found.""" - - def __init__(self, message="Board record not found"): - super().__init__(message) - - -class BoardRecordSaveException(Exception): - """Raised when an board record cannot be saved.""" - - def __init__(self, message="Board record not saved"): - super().__init__(message) - - -class BoardRecordDeleteException(Exception): - """Raised when an board record cannot be deleted.""" - - def __init__(self, message="Board record not deleted"): - super().__init__(message) - - -class BoardRecordStorageBase(ABC): - """Low-level service responsible for interfacing with the board record store.""" - - @abstractmethod - def delete(self, board_id: str) -> None: - """Deletes a board record.""" - pass - - @abstractmethod - def save( - self, - board_name: str, - ) -> BoardRecord: - """Saves a board record.""" - pass - - @abstractmethod - def get( - self, - board_id: str, - ) -> BoardRecord: - """Gets a board record.""" - pass - - @abstractmethod - def update( - self, - board_id: str, - changes: BoardChanges, - ) -> BoardRecord: - """Updates a board record.""" - pass - - @abstractmethod - def get_many( - self, - offset: int = 0, - limit: int = 10, - ) -> OffsetPaginatedResults[BoardRecord]: - """Gets many board records.""" - pass - - @abstractmethod - def get_all( - self, - ) -> list[BoardRecord]: - """Gets all board records.""" - pass +from .board_records_base import BoardRecordStorageBase +from .board_records_common import ( + BoardChanges, + BoardRecord, + BoardRecordDeleteException, + BoardRecordNotFoundException, + BoardRecordSaveException, + deserialize_board_record, +) class SqliteBoardRecordStorage(BoardRecordStorageBase): diff --git a/invokeai/app/services/boards/__init__.py b/invokeai/app/services/boards/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/app/services/boards/boards_base.py b/invokeai/app/services/boards/boards_base.py new file mode 100644 index 0000000000..6f90334d53 --- /dev/null +++ b/invokeai/app/services/boards/boards_base.py @@ -0,0 +1,59 @@ +from abc import ABC, abstractmethod + +from invokeai.app.services.board_records.board_records_common import BoardChanges +from invokeai.app.services.shared.pagination import OffsetPaginatedResults + +from .boards_common import BoardDTO + + +class BoardServiceABC(ABC): + """High-level service for board management.""" + + @abstractmethod + def create( + self, + board_name: str, + ) -> BoardDTO: + """Creates a board.""" + pass + + @abstractmethod + def get_dto( + self, + board_id: str, + ) -> BoardDTO: + """Gets a board.""" + pass + + @abstractmethod + def update( + self, + board_id: str, + changes: BoardChanges, + ) -> BoardDTO: + """Updates a board.""" + pass + + @abstractmethod + def delete( + self, + board_id: str, + ) -> None: + """Deletes a board.""" + pass + + @abstractmethod + def get_many( + self, + offset: int = 0, + limit: int = 10, + ) -> OffsetPaginatedResults[BoardDTO]: + """Gets many boards.""" + pass + + @abstractmethod + def get_all( + self, + ) -> list[BoardDTO]: + """Gets all boards.""" + pass diff --git a/invokeai/app/services/boards/boards_common.py b/invokeai/app/services/boards/boards_common.py new file mode 100644 index 0000000000..e22e1915fe --- /dev/null +++ b/invokeai/app/services/boards/boards_common.py @@ -0,0 +1,23 @@ +from typing import Optional + +from pydantic import Field + +from ..board_records.board_records_common import BoardRecord + + +class BoardDTO(BoardRecord): + """Deserialized board record with cover image URL and image count.""" + + cover_image_name: Optional[str] = Field(description="The name of the board's cover image.") + """The URL of the thumbnail of the most recent image in the board.""" + image_count: int = Field(description="The number of images in the board.") + """The number of images in the board.""" + + +def board_record_to_dto(board_record: BoardRecord, cover_image_name: Optional[str], image_count: int) -> BoardDTO: + """Converts a board record to a board DTO.""" + return BoardDTO( + **board_record.dict(exclude={"cover_image_name"}), + cover_image_name=cover_image_name, + image_count=image_count, + ) diff --git a/invokeai/app/services/boards.py b/invokeai/app/services/boards/boards_default.py similarity index 72% rename from invokeai/app/services/boards.py rename to invokeai/app/services/boards/boards_default.py index 8b6f70d3e3..5b37d6c7ad 100644 --- a/invokeai/app/services/boards.py +++ b/invokeai/app/services/boards/boards_default.py @@ -1,63 +1,10 @@ -from abc import ABC, abstractmethod - -from invokeai.app.services.board_images import board_record_to_dto -from invokeai.app.services.board_record_storage import BoardChanges +from invokeai.app.services.board_records.board_records_common import BoardChanges +from invokeai.app.services.boards.boards_common import BoardDTO from invokeai.app.services.invoker import Invoker -from invokeai.app.services.models.board_record import BoardDTO from invokeai.app.services.shared.pagination import OffsetPaginatedResults - -class BoardServiceABC(ABC): - """High-level service for board management.""" - - @abstractmethod - def create( - self, - board_name: str, - ) -> BoardDTO: - """Creates a board.""" - pass - - @abstractmethod - def get_dto( - self, - board_id: str, - ) -> BoardDTO: - """Gets a board.""" - pass - - @abstractmethod - def update( - self, - board_id: str, - changes: BoardChanges, - ) -> BoardDTO: - """Updates a board.""" - pass - - @abstractmethod - def delete( - self, - board_id: str, - ) -> None: - """Deletes a board.""" - pass - - @abstractmethod - def get_many( - self, - offset: int = 0, - limit: int = 10, - ) -> OffsetPaginatedResults[BoardDTO]: - """Gets many boards.""" - pass - - @abstractmethod - def get_all( - self, - ) -> list[BoardDTO]: - """Gets all boards.""" - pass +from .boards_base import BoardServiceABC +from .boards_common import board_record_to_dto class BoardService(BoardServiceABC): diff --git a/invokeai/app/services/config/__init__.py b/invokeai/app/services/config/__init__.py index a404f33638..b9a92b03d2 100644 --- a/invokeai/app/services/config/__init__.py +++ b/invokeai/app/services/config/__init__.py @@ -2,5 +2,5 @@ Init file for InvokeAI configure package """ -from .base import PagingArgumentParser # noqa F401 -from .invokeai_config import InvokeAIAppConfig, get_invokeai_config # noqa F401 +from .config_base import PagingArgumentParser # noqa F401 +from .config_default import InvokeAIAppConfig, get_invokeai_config # noqa F401 diff --git a/invokeai/app/services/config/base.py b/invokeai/app/services/config/config_base.py similarity index 92% rename from invokeai/app/services/config/base.py rename to invokeai/app/services/config/config_base.py index f24879af05..a07e14252a 100644 --- a/invokeai/app/services/config/base.py +++ b/invokeai/app/services/config/config_base.py @@ -12,7 +12,6 @@ from __future__ import annotations import argparse import os -import pydoc import sys from argparse import ArgumentParser from pathlib import Path @@ -21,16 +20,7 @@ from typing import ClassVar, Dict, List, Literal, Optional, Union, get_args, get from omegaconf import DictConfig, ListConfig, OmegaConf from pydantic import BaseSettings - -class PagingArgumentParser(argparse.ArgumentParser): - """ - A custom ArgumentParser that uses pydoc to page its output. - It also supports reading defaults from an init file. - """ - - def print_help(self, file=None): - text = self.format_help() - pydoc.pager(text) +from invokeai.app.services.config.config_common import PagingArgumentParser, int_or_float_or_str class InvokeAISettings(BaseSettings): @@ -223,18 +213,3 @@ class InvokeAISettings(BaseSettings): action=argparse.BooleanOptionalAction if field.type_ == bool else "store", help=field.field_info.description, ) - - -def int_or_float_or_str(value: str) -> Union[int, float, str]: - """ - Workaround for argparse type checking. - """ - try: - return int(value) - except Exception as e: # noqa F841 - pass - try: - return float(value) - except Exception as e: # noqa F841 - pass - return str(value) diff --git a/invokeai/app/services/config/config_common.py b/invokeai/app/services/config/config_common.py new file mode 100644 index 0000000000..d11bcabcf9 --- /dev/null +++ b/invokeai/app/services/config/config_common.py @@ -0,0 +1,41 @@ +# Copyright (c) 2023 Lincoln Stein (https://github.com/lstein) and the InvokeAI Development Team + +""" +Base class for the InvokeAI configuration system. +It defines a type of pydantic BaseSettings object that +is able to read and write from an omegaconf-based config file, +with overriding of settings from environment variables and/or +the command line. +""" + +from __future__ import annotations + +import argparse +import pydoc +from typing import Union + + +class PagingArgumentParser(argparse.ArgumentParser): + """ + A custom ArgumentParser that uses pydoc to page its output. + It also supports reading defaults from an init file. + """ + + def print_help(self, file=None): + text = self.format_help() + pydoc.pager(text) + + +def int_or_float_or_str(value: str) -> Union[int, float, str]: + """ + Workaround for argparse type checking. + """ + try: + return int(value) + except Exception as e: # noqa F841 + pass + try: + return float(value) + except Exception as e: # noqa F841 + pass + return str(value) diff --git a/invokeai/app/services/config/invokeai_config.py b/invokeai/app/services/config/config_default.py similarity index 99% rename from invokeai/app/services/config/invokeai_config.py rename to invokeai/app/services/config/config_default.py index d8b598815d..87e24bcbc0 100644 --- a/invokeai/app/services/config/invokeai_config.py +++ b/invokeai/app/services/config/config_default.py @@ -177,7 +177,7 @@ from typing import ClassVar, Dict, List, Literal, Optional, Union, get_type_hint from omegaconf import DictConfig, OmegaConf from pydantic import Field, parse_obj_as -from .base import InvokeAISettings +from .config_base import InvokeAISettings INIT_FILE = Path("invokeai.yaml") DB_FILE = Path("invokeai.db") diff --git a/invokeai/app/services/events/__init__.py b/invokeai/app/services/events/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/app/services/events.py b/invokeai/app/services/events/events_base.py similarity index 97% rename from invokeai/app/services/events.py rename to invokeai/app/services/events/events_base.py index 0a02a03539..8685db3717 100644 --- a/invokeai/app/services/events.py +++ b/invokeai/app/services/events/events_base.py @@ -2,8 +2,8 @@ from typing import Any, Optional -from invokeai.app.models.image import ProgressImage -from invokeai.app.services.model_manager_service import BaseModelType, ModelInfo, ModelType, SubModelType +from invokeai.app.invocations.model import ModelInfo +from invokeai.app.services.invocation_processor.invocation_processor_common import ProgressImage from invokeai.app.services.session_queue.session_queue_common import ( BatchStatus, EnqueueBatchResult, @@ -11,6 +11,7 @@ from invokeai.app.services.session_queue.session_queue_common import ( SessionQueueStatus, ) from invokeai.app.util.misc import get_timestamp +from invokeai.backend.model_management.models.base import BaseModelType, ModelType, SubModelType class EventServiceBase: diff --git a/invokeai/app/services/image_files/__init__.py b/invokeai/app/services/image_files/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/app/services/image_files/image_files_base.py b/invokeai/app/services/image_files/image_files_base.py new file mode 100644 index 0000000000..d998f9024b --- /dev/null +++ b/invokeai/app/services/image_files/image_files_base.py @@ -0,0 +1,42 @@ +from abc import ABC, abstractmethod +from typing import Optional + +from PIL.Image import Image as PILImageType + + +class ImageFileStorageBase(ABC): + """Low-level service responsible for storing and retrieving image files.""" + + @abstractmethod + def get(self, image_name: str) -> PILImageType: + """Retrieves an image as PIL Image.""" + pass + + @abstractmethod + def get_path(self, image_name: str, thumbnail: bool = False) -> str: + """Gets the internal path to an image or thumbnail.""" + pass + + # TODO: We need to validate paths before starlette makes the FileResponse, else we get a + # 500 internal server error. I don't like having this method on the service. + @abstractmethod + def validate_path(self, path: str) -> bool: + """Validates the path given for an image or thumbnail.""" + pass + + @abstractmethod + def save( + self, + image: PILImageType, + image_name: str, + metadata: Optional[dict] = None, + workflow: Optional[str] = None, + thumbnail_size: int = 256, + ) -> None: + """Saves an image and a 256x256 WEBP thumbnail. Returns a tuple of the image name, thumbnail name, and created timestamp.""" + pass + + @abstractmethod + def delete(self, image_name: str) -> None: + """Deletes an image and its thumbnail (if one exists).""" + pass diff --git a/invokeai/app/services/image_files/image_files_common.py b/invokeai/app/services/image_files/image_files_common.py new file mode 100644 index 0000000000..e9cc2a3fa7 --- /dev/null +++ b/invokeai/app/services/image_files/image_files_common.py @@ -0,0 +1,20 @@ +# TODO: Should these excpetions subclass existing python exceptions? +class ImageFileNotFoundException(Exception): + """Raised when an image file is not found in storage.""" + + def __init__(self, message="Image file not found"): + super().__init__(message) + + +class ImageFileSaveException(Exception): + """Raised when an image cannot be saved.""" + + def __init__(self, message="Image file not saved"): + super().__init__(message) + + +class ImageFileDeleteException(Exception): + """Raised when an image cannot be deleted.""" + + def __init__(self, message="Image file not deleted"): + super().__init__(message) diff --git a/invokeai/app/services/image_file_storage.py b/invokeai/app/services/image_files/image_files_disk.py similarity index 74% rename from invokeai/app/services/image_file_storage.py rename to invokeai/app/services/image_files/image_files_disk.py index 7f32671a8c..d6d55ff39f 100644 --- a/invokeai/app/services/image_file_storage.py +++ b/invokeai/app/services/image_files/image_files_disk.py @@ -1,6 +1,5 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team import json -from abc import ABC, abstractmethod from pathlib import Path from queue import Queue from typing import Dict, Optional, Union @@ -12,65 +11,8 @@ from send2trash import send2trash from invokeai.app.services.config.invokeai_config import InvokeAIAppConfig from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail - -# TODO: Should these excpetions subclass existing python exceptions? -class ImageFileNotFoundException(Exception): - """Raised when an image file is not found in storage.""" - - def __init__(self, message="Image file not found"): - super().__init__(message) - - -class ImageFileSaveException(Exception): - """Raised when an image cannot be saved.""" - - def __init__(self, message="Image file not saved"): - super().__init__(message) - - -class ImageFileDeleteException(Exception): - """Raised when an image cannot be deleted.""" - - def __init__(self, message="Image file not deleted"): - super().__init__(message) - - -class ImageFileStorageBase(ABC): - """Low-level service responsible for storing and retrieving image files.""" - - @abstractmethod - def get(self, image_name: str) -> PILImageType: - """Retrieves an image as PIL Image.""" - pass - - @abstractmethod - def get_path(self, image_name: str, thumbnail: bool = False) -> str: - """Gets the internal path to an image or thumbnail.""" - pass - - # TODO: We need to validate paths before starlette makes the FileResponse, else we get a - # 500 internal server error. I don't like having this method on the service. - @abstractmethod - def validate_path(self, path: str) -> bool: - """Validates the path given for an image or thumbnail.""" - pass - - @abstractmethod - def save( - self, - image: PILImageType, - image_name: str, - metadata: Optional[dict] = None, - workflow: Optional[str] = None, - thumbnail_size: int = 256, - ) -> None: - """Saves an image and a 256x256 WEBP thumbnail. Returns a tuple of the image name, thumbnail name, and created timestamp.""" - pass - - @abstractmethod - def delete(self, image_name: str) -> None: - """Deletes an image and its thumbnail (if one exists).""" - pass +from .image_files_base import ImageFileStorageBase +from .image_files_common import ImageFileDeleteException, ImageFileNotFoundException, ImageFileSaveException class DiskImageFileStorage(ImageFileStorageBase): diff --git a/invokeai/app/services/image_records/__init__.py b/invokeai/app/services/image_records/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/app/services/image_records/image_records_base.py b/invokeai/app/services/image_records/image_records_base.py new file mode 100644 index 0000000000..58db6feb23 --- /dev/null +++ b/invokeai/app/services/image_records/image_records_base.py @@ -0,0 +1,84 @@ +from abc import ABC, abstractmethod +from datetime import datetime +from typing import Optional + +from invokeai.app.services.shared.pagination import OffsetPaginatedResults + +from .image_records_common import ImageCategory, ImageRecord, ImageRecordChanges, ResourceOrigin + + +class ImageRecordStorageBase(ABC): + """Low-level service responsible for interfacing with the image record store.""" + + # TODO: Implement an `update()` method + + @abstractmethod + def get(self, image_name: str) -> ImageRecord: + """Gets an image record.""" + pass + + @abstractmethod + def get_metadata(self, image_name: str) -> Optional[dict]: + """Gets an image's metadata'.""" + pass + + @abstractmethod + def update( + self, + image_name: str, + changes: ImageRecordChanges, + ) -> None: + """Updates an image record.""" + pass + + @abstractmethod + def get_many( + self, + offset: Optional[int] = None, + limit: Optional[int] = None, + image_origin: Optional[ResourceOrigin] = None, + categories: Optional[list[ImageCategory]] = None, + is_intermediate: Optional[bool] = None, + board_id: Optional[str] = None, + ) -> OffsetPaginatedResults[ImageRecord]: + """Gets a page of image records.""" + pass + + # TODO: The database has a nullable `deleted_at` column, currently unused. + # Should we implement soft deletes? Would need coordination with ImageFileStorage. + @abstractmethod + def delete(self, image_name: str) -> None: + """Deletes an image record.""" + pass + + @abstractmethod + def delete_many(self, image_names: list[str]) -> None: + """Deletes many image records.""" + pass + + @abstractmethod + def delete_intermediates(self) -> list[str]: + """Deletes all intermediate image records, returning a list of deleted image names.""" + pass + + @abstractmethod + def save( + self, + image_name: str, + image_origin: ResourceOrigin, + image_category: ImageCategory, + width: int, + height: int, + session_id: Optional[str], + node_id: Optional[str], + metadata: Optional[dict], + is_intermediate: bool = False, + starred: bool = False, + ) -> datetime: + """Saves an image record.""" + pass + + @abstractmethod + def get_most_recent_image_for_board(self, board_id: str) -> Optional[ImageRecord]: + """Gets the most recent image for a board.""" + pass diff --git a/invokeai/app/services/models/image_record.py b/invokeai/app/services/image_records/image_records_common.py similarity index 59% rename from invokeai/app/services/models/image_record.py rename to invokeai/app/services/image_records/image_records_common.py index 3b215f5b88..39fac92048 100644 --- a/invokeai/app/services/models/image_record.py +++ b/invokeai/app/services/image_records/image_records_common.py @@ -1,13 +1,117 @@ +# TODO: Should these excpetions subclass existing python exceptions? import datetime +from enum import Enum from typing import Optional, Union from pydantic import Extra, Field, StrictBool, StrictStr -from invokeai.app.models.image import ImageCategory, ResourceOrigin +from invokeai.app.util.metaenum import MetaEnum from invokeai.app.util.misc import get_iso_timestamp from invokeai.app.util.model_exclude_null import BaseModelExcludeNull +class ResourceOrigin(str, Enum, metaclass=MetaEnum): + """The origin of a resource (eg image). + + - INTERNAL: The resource was created by the application. + - EXTERNAL: The resource was not created by the application. + This may be a user-initiated upload, or an internal application upload (eg Canvas init image). + """ + + INTERNAL = "internal" + """The resource was created by the application.""" + EXTERNAL = "external" + """The resource was not created by the application. + This may be a user-initiated upload, or an internal application upload (eg Canvas init image). + """ + + +class InvalidOriginException(ValueError): + """Raised when a provided value is not a valid ResourceOrigin. + + Subclasses `ValueError`. + """ + + def __init__(self, message="Invalid resource origin."): + super().__init__(message) + + +class ImageCategory(str, Enum, metaclass=MetaEnum): + """The category of an image. + + - GENERAL: The image is an output, init image, or otherwise an image without a specialized purpose. + - MASK: The image is a mask image. + - CONTROL: The image is a ControlNet control image. + - USER: The image is a user-provide image. + - OTHER: The image is some other type of image with a specialized purpose. To be used by external nodes. + """ + + GENERAL = "general" + """GENERAL: The image is an output, init image, or otherwise an image without a specialized purpose.""" + MASK = "mask" + """MASK: The image is a mask image.""" + CONTROL = "control" + """CONTROL: The image is a ControlNet control image.""" + USER = "user" + """USER: The image is a user-provide image.""" + OTHER = "other" + """OTHER: The image is some other type of image with a specialized purpose. To be used by external nodes.""" + + +class InvalidImageCategoryException(ValueError): + """Raised when a provided value is not a valid ImageCategory. + + Subclasses `ValueError`. + """ + + def __init__(self, message="Invalid image category."): + super().__init__(message) + + +class ImageRecordNotFoundException(Exception): + """Raised when an image record is not found.""" + + def __init__(self, message="Image record not found"): + super().__init__(message) + + +class ImageRecordSaveException(Exception): + """Raised when an image record cannot be saved.""" + + def __init__(self, message="Image record not saved"): + super().__init__(message) + + +class ImageRecordDeleteException(Exception): + """Raised when an image record cannot be deleted.""" + + def __init__(self, message="Image record not deleted"): + super().__init__(message) + + +IMAGE_DTO_COLS = ", ".join( + list( + map( + lambda c: "images." + c, + [ + "image_name", + "image_origin", + "image_category", + "width", + "height", + "session_id", + "node_id", + "is_intermediate", + "created_at", + "updated_at", + "deleted_at", + "starred", + ], + ) + ) +) + + class ImageRecord(BaseModelExcludeNull): """Deserialized image record without metadata.""" @@ -66,41 +170,6 @@ class ImageRecordChanges(BaseModelExcludeNull, extra=Extra.forbid): """The image's new `starred` state.""" -class ImageUrlsDTO(BaseModelExcludeNull): - """The URLs for an image and its thumbnail.""" - - image_name: str = Field(description="The unique name of the image.") - """The unique name of the image.""" - image_url: str = Field(description="The URL of the image.") - """The URL of the image.""" - thumbnail_url: str = Field(description="The URL of the image's thumbnail.") - """The URL of the image's thumbnail.""" - - -class ImageDTO(ImageRecord, ImageUrlsDTO): - """Deserialized image record, enriched for the frontend.""" - - board_id: Optional[str] = Field(description="The id of the board the image belongs to, if one exists.") - """The id of the board the image belongs to, if one exists.""" - - pass - - -def image_record_to_dto( - image_record: ImageRecord, - image_url: str, - thumbnail_url: str, - board_id: Optional[str], -) -> ImageDTO: - """Converts an image record to an image DTO.""" - return ImageDTO( - **image_record.dict(), - image_url=image_url, - thumbnail_url=thumbnail_url, - board_id=board_id, - ) - - def deserialize_image_record(image_dict: dict) -> ImageRecord: """Deserializes an image record.""" diff --git a/invokeai/app/services/image_record_storage.py b/invokeai/app/services/image_records/image_records_sqlite.py similarity index 81% rename from invokeai/app/services/image_record_storage.py rename to invokeai/app/services/image_records/image_records_sqlite.py index 509dd03d22..e50138a1c4 100644 --- a/invokeai/app/services/image_record_storage.py +++ b/invokeai/app/services/image_records/image_records_sqlite.py @@ -1,138 +1,26 @@ import json import sqlite3 import threading -from abc import ABC, abstractmethod from datetime import datetime from typing import Optional, cast -from invokeai.app.models.image import ImageCategory, ResourceOrigin -from invokeai.app.services.models.image_record import ImageRecord, ImageRecordChanges, deserialize_image_record -from invokeai.app.services.shared.sqlite import SqliteDatabase from invokeai.app.services.shared.pagination import OffsetPaginatedResults +from invokeai.app.services.shared.sqlite import SqliteDatabase - -# TODO: Should these excpetions subclass existing python exceptions? -class ImageRecordNotFoundException(Exception): - """Raised when an image record is not found.""" - - def __init__(self, message="Image record not found"): - super().__init__(message) - - -class ImageRecordSaveException(Exception): - """Raised when an image record cannot be saved.""" - - def __init__(self, message="Image record not saved"): - super().__init__(message) - - -class ImageRecordDeleteException(Exception): - """Raised when an image record cannot be deleted.""" - - def __init__(self, message="Image record not deleted"): - super().__init__(message) - - -IMAGE_DTO_COLS = ", ".join( - list( - map( - lambda c: "images." + c, - [ - "image_name", - "image_origin", - "image_category", - "width", - "height", - "session_id", - "node_id", - "is_intermediate", - "created_at", - "updated_at", - "deleted_at", - "starred", - ], - ) - ) +from .image_records_base import ImageRecordStorageBase +from .image_records_common import ( + IMAGE_DTO_COLS, + ImageCategory, + ImageRecord, + ImageRecordChanges, + ImageRecordDeleteException, + ImageRecordNotFoundException, + ImageRecordSaveException, + ResourceOrigin, + deserialize_image_record, ) -class ImageRecordStorageBase(ABC): - """Low-level service responsible for interfacing with the image record store.""" - - # TODO: Implement an `update()` method - - @abstractmethod - def get(self, image_name: str) -> ImageRecord: - """Gets an image record.""" - pass - - @abstractmethod - def get_metadata(self, image_name: str) -> Optional[dict]: - """Gets an image's metadata'.""" - pass - - @abstractmethod - def update( - self, - image_name: str, - changes: ImageRecordChanges, - ) -> None: - """Updates an image record.""" - pass - - @abstractmethod - def get_many( - self, - offset: Optional[int] = None, - limit: Optional[int] = None, - image_origin: Optional[ResourceOrigin] = None, - categories: Optional[list[ImageCategory]] = None, - is_intermediate: Optional[bool] = None, - board_id: Optional[str] = None, - ) -> OffsetPaginatedResults[ImageRecord]: - """Gets a page of image records.""" - pass - - # TODO: The database has a nullable `deleted_at` column, currently unused. - # Should we implement soft deletes? Would need coordination with ImageFileStorage. - @abstractmethod - def delete(self, image_name: str) -> None: - """Deletes an image record.""" - pass - - @abstractmethod - def delete_many(self, image_names: list[str]) -> None: - """Deletes many image records.""" - pass - - @abstractmethod - def delete_intermediates(self) -> list[str]: - """Deletes all intermediate image records, returning a list of deleted image names.""" - pass - - @abstractmethod - def save( - self, - image_name: str, - image_origin: ResourceOrigin, - image_category: ImageCategory, - width: int, - height: int, - session_id: Optional[str], - node_id: Optional[str], - metadata: Optional[dict], - is_intermediate: bool = False, - starred: bool = False, - ) -> datetime: - """Saves an image record.""" - pass - - @abstractmethod - def get_most_recent_image_for_board(self, board_id: str) -> Optional[ImageRecord]: - """Gets the most recent image for a board.""" - pass - - class SqliteImageRecordStorage(ImageRecordStorageBase): _conn: sqlite3.Connection _cursor: sqlite3.Cursor diff --git a/invokeai/app/services/images/__init__.py b/invokeai/app/services/images/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/app/services/images/images_base.py b/invokeai/app/services/images/images_base.py new file mode 100644 index 0000000000..71581099a3 --- /dev/null +++ b/invokeai/app/services/images/images_base.py @@ -0,0 +1,129 @@ +from abc import ABC, abstractmethod +from typing import Callable, Optional + +from PIL.Image import Image as PILImageType + +from invokeai.app.invocations.metadata import ImageMetadata +from invokeai.app.services.image_records.image_records_common import ( + ImageCategory, + ImageRecord, + ImageRecordChanges, + ResourceOrigin, +) +from invokeai.app.services.images.images_common import ImageDTO +from invokeai.app.services.shared.pagination import OffsetPaginatedResults + + +class ImageServiceABC(ABC): + """High-level service for image management.""" + + _on_changed_callbacks: list[Callable[[ImageDTO], None]] + _on_deleted_callbacks: list[Callable[[str], None]] + + def __init__(self) -> None: + self._on_changed_callbacks = list() + self._on_deleted_callbacks = list() + + def on_changed(self, on_changed: Callable[[ImageDTO], None]) -> None: + """Register a callback for when an image is changed""" + self._on_changed_callbacks.append(on_changed) + + def on_deleted(self, on_deleted: Callable[[str], None]) -> None: + """Register a callback for when an image is deleted""" + self._on_deleted_callbacks.append(on_deleted) + + def _on_changed(self, item: ImageDTO) -> None: + for callback in self._on_changed_callbacks: + callback(item) + + def _on_deleted(self, item_id: str) -> None: + for callback in self._on_deleted_callbacks: + callback(item_id) + + @abstractmethod + def create( + self, + image: PILImageType, + image_origin: ResourceOrigin, + image_category: ImageCategory, + node_id: Optional[str] = None, + session_id: Optional[str] = None, + board_id: Optional[str] = None, + is_intermediate: bool = False, + metadata: Optional[dict] = None, + workflow: Optional[str] = None, + ) -> ImageDTO: + """Creates an image, storing the file and its metadata.""" + pass + + @abstractmethod + def update( + self, + image_name: str, + changes: ImageRecordChanges, + ) -> ImageDTO: + """Updates an image.""" + pass + + @abstractmethod + def get_pil_image(self, image_name: str) -> PILImageType: + """Gets an image as a PIL image.""" + pass + + @abstractmethod + def get_record(self, image_name: str) -> ImageRecord: + """Gets an image record.""" + pass + + @abstractmethod + def get_dto(self, image_name: str) -> ImageDTO: + """Gets an image DTO.""" + pass + + @abstractmethod + def get_metadata(self, image_name: str) -> ImageMetadata: + """Gets an image's metadata.""" + pass + + @abstractmethod + def get_path(self, image_name: str, thumbnail: bool = False) -> str: + """Gets an image's path.""" + pass + + @abstractmethod + def validate_path(self, path: str) -> bool: + """Validates an image's path.""" + pass + + @abstractmethod + def get_url(self, image_name: str, thumbnail: bool = False) -> str: + """Gets an image's or thumbnail's URL.""" + pass + + @abstractmethod + def get_many( + self, + offset: int = 0, + limit: int = 10, + image_origin: Optional[ResourceOrigin] = None, + categories: Optional[list[ImageCategory]] = None, + is_intermediate: Optional[bool] = None, + board_id: Optional[str] = None, + ) -> OffsetPaginatedResults[ImageDTO]: + """Gets a paginated list of image DTOs.""" + pass + + @abstractmethod + def delete(self, image_name: str): + """Deletes an image.""" + pass + + @abstractmethod + def delete_intermediates(self) -> int: + """Deletes all intermediate images.""" + pass + + @abstractmethod + def delete_images_on_board(self, board_id: str): + """Deletes all images on a board.""" + pass diff --git a/invokeai/app/services/images/images_common.py b/invokeai/app/services/images/images_common.py new file mode 100644 index 0000000000..f8b63a16c1 --- /dev/null +++ b/invokeai/app/services/images/images_common.py @@ -0,0 +1,41 @@ +from typing import Optional + +from pydantic import Field + +from invokeai.app.services.image_records.image_records_common import ImageRecord +from invokeai.app.util.model_exclude_null import BaseModelExcludeNull + + +class ImageUrlsDTO(BaseModelExcludeNull): + """The URLs for an image and its thumbnail.""" + + image_name: str = Field(description="The unique name of the image.") + """The unique name of the image.""" + image_url: str = Field(description="The URL of the image.") + """The URL of the image.""" + thumbnail_url: str = Field(description="The URL of the image's thumbnail.") + """The URL of the image's thumbnail.""" + + +class ImageDTO(ImageRecord, ImageUrlsDTO): + """Deserialized image record, enriched for the frontend.""" + + board_id: Optional[str] = Field(description="The id of the board the image belongs to, if one exists.") + """The id of the board the image belongs to, if one exists.""" + + pass + + +def image_record_to_dto( + image_record: ImageRecord, + image_url: str, + thumbnail_url: str, + board_id: Optional[str], +) -> ImageDTO: + """Converts an image record to an image DTO.""" + return ImageDTO( + **image_record.dict(), + image_url=image_url, + thumbnail_url=thumbnail_url, + board_id=board_id, + ) diff --git a/invokeai/app/services/images.py b/invokeai/app/services/images/images_default.py similarity index 74% rename from invokeai/app/services/images.py rename to invokeai/app/services/images/images_default.py index d68d5479f4..9134b9a4f6 100644 --- a/invokeai/app/services/images.py +++ b/invokeai/app/services/images/images_default.py @@ -1,144 +1,30 @@ -from abc import ABC, abstractmethod -from typing import Callable, Optional +from typing import Optional from PIL.Image import Image as PILImageType from invokeai.app.invocations.metadata import ImageMetadata -from invokeai.app.models.image import ( - ImageCategory, - InvalidImageCategoryException, - InvalidOriginException, - ResourceOrigin, -) -from invokeai.app.services.image_file_storage import ( +from invokeai.app.services.invoker import Invoker +from invokeai.app.services.shared.pagination import OffsetPaginatedResults +from invokeai.app.util.metadata import get_metadata_graph_from_raw_session + +from ..image_files.image_files_common import ( ImageFileDeleteException, ImageFileNotFoundException, ImageFileSaveException, ) -from invokeai.app.services.image_record_storage import ( +from ..image_records.image_records_common import ( + ImageCategory, + ImageRecord, + ImageRecordChanges, ImageRecordDeleteException, ImageRecordNotFoundException, ImageRecordSaveException, + InvalidImageCategoryException, + InvalidOriginException, + ResourceOrigin, ) -from invokeai.app.services.invoker import Invoker -from invokeai.app.services.models.image_record import ImageDTO, ImageRecord, ImageRecordChanges, image_record_to_dto -from invokeai.app.services.shared.pagination import OffsetPaginatedResults -from invokeai.app.util.metadata import get_metadata_graph_from_raw_session - - -class ImageServiceABC(ABC): - """High-level service for image management.""" - - _on_changed_callbacks: list[Callable[[ImageDTO], None]] - _on_deleted_callbacks: list[Callable[[str], None]] - - def __init__(self) -> None: - self._on_changed_callbacks = list() - self._on_deleted_callbacks = list() - - def on_changed(self, on_changed: Callable[[ImageDTO], None]) -> None: - """Register a callback for when an image is changed""" - self._on_changed_callbacks.append(on_changed) - - def on_deleted(self, on_deleted: Callable[[str], None]) -> None: - """Register a callback for when an image is deleted""" - self._on_deleted_callbacks.append(on_deleted) - - def _on_changed(self, item: ImageDTO) -> None: - for callback in self._on_changed_callbacks: - callback(item) - - def _on_deleted(self, item_id: str) -> None: - for callback in self._on_deleted_callbacks: - callback(item_id) - - @abstractmethod - def create( - self, - image: PILImageType, - image_origin: ResourceOrigin, - image_category: ImageCategory, - node_id: Optional[str] = None, - session_id: Optional[str] = None, - board_id: Optional[str] = None, - is_intermediate: bool = False, - metadata: Optional[dict] = None, - workflow: Optional[str] = None, - ) -> ImageDTO: - """Creates an image, storing the file and its metadata.""" - pass - - @abstractmethod - def update( - self, - image_name: str, - changes: ImageRecordChanges, - ) -> ImageDTO: - """Updates an image.""" - pass - - @abstractmethod - def get_pil_image(self, image_name: str) -> PILImageType: - """Gets an image as a PIL image.""" - pass - - @abstractmethod - def get_record(self, image_name: str) -> ImageRecord: - """Gets an image record.""" - pass - - @abstractmethod - def get_dto(self, image_name: str) -> ImageDTO: - """Gets an image DTO.""" - pass - - @abstractmethod - def get_metadata(self, image_name: str) -> ImageMetadata: - """Gets an image's metadata.""" - pass - - @abstractmethod - def get_path(self, image_name: str, thumbnail: bool = False) -> str: - """Gets an image's path.""" - pass - - @abstractmethod - def validate_path(self, path: str) -> bool: - """Validates an image's path.""" - pass - - @abstractmethod - def get_url(self, image_name: str, thumbnail: bool = False) -> str: - """Gets an image's or thumbnail's URL.""" - pass - - @abstractmethod - def get_many( - self, - offset: int = 0, - limit: int = 10, - image_origin: Optional[ResourceOrigin] = None, - categories: Optional[list[ImageCategory]] = None, - is_intermediate: Optional[bool] = None, - board_id: Optional[str] = None, - ) -> OffsetPaginatedResults[ImageDTO]: - """Gets a paginated list of image DTOs.""" - pass - - @abstractmethod - def delete(self, image_name: str): - """Deletes an image.""" - pass - - @abstractmethod - def delete_intermediates(self) -> int: - """Deletes all intermediate images.""" - pass - - @abstractmethod - def delete_images_on_board(self, board_id: str): - """Deletes all images on a board.""" - pass +from .images_base import ImageServiceABC +from .images_common import ImageDTO, image_record_to_dto class ImageService(ImageServiceABC): diff --git a/invokeai/app/services/invocation_processor/__init__.py b/invokeai/app/services/invocation_processor/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/app/services/invocation_processor/invocation_processor_base.py b/invokeai/app/services/invocation_processor/invocation_processor_base.py new file mode 100644 index 0000000000..04774accc2 --- /dev/null +++ b/invokeai/app/services/invocation_processor/invocation_processor_base.py @@ -0,0 +1,5 @@ +from abc import ABC + + +class InvocationProcessorABC(ABC): + pass diff --git a/invokeai/app/services/invocation_processor/invocation_processor_common.py b/invokeai/app/services/invocation_processor/invocation_processor_common.py new file mode 100644 index 0000000000..347f6c7323 --- /dev/null +++ b/invokeai/app/services/invocation_processor/invocation_processor_common.py @@ -0,0 +1,15 @@ +from pydantic import BaseModel, Field + + +class ProgressImage(BaseModel): + """The progress image sent intermittently during processing""" + + width: int = Field(description="The effective width of the image in pixels") + height: int = Field(description="The effective height of the image in pixels") + dataURL: str = Field(description="The image data as a b64 data URL") + + +class CanceledException(Exception): + """Execution canceled by user.""" + + pass diff --git a/invokeai/app/services/processor.py b/invokeai/app/services/invocation_processor/invocation_processor_default.py similarity index 96% rename from invokeai/app/services/processor.py rename to invokeai/app/services/invocation_processor/invocation_processor_default.py index 226920bdaf..349c4a03e4 100644 --- a/invokeai/app/services/processor.py +++ b/invokeai/app/services/invocation_processor/invocation_processor_default.py @@ -4,11 +4,12 @@ from threading import BoundedSemaphore, Event, Thread from typing import Optional import invokeai.backend.util.logging as logger +from invokeai.app.invocations.baseinvocation import InvocationContext +from invokeai.app.services.invocation_queue.invocation_queue_common import InvocationQueueItem -from ..invocations.baseinvocation import InvocationContext -from ..models.exceptions import CanceledException -from .invocation_queue import InvocationQueueItem -from .invoker import InvocationProcessorABC, Invoker +from ..invoker import Invoker +from .invocation_processor_base import InvocationProcessorABC +from .invocation_processor_common import CanceledException class DefaultInvocationProcessor(InvocationProcessorABC): diff --git a/invokeai/app/services/invocation_queue/__init__.py b/invokeai/app/services/invocation_queue/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/app/services/invocation_queue/invocation_queue_base.py b/invokeai/app/services/invocation_queue/invocation_queue_base.py new file mode 100644 index 0000000000..09f4875c5f --- /dev/null +++ b/invokeai/app/services/invocation_queue/invocation_queue_base.py @@ -0,0 +1,26 @@ +# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) + +from abc import ABC, abstractmethod +from typing import Optional + +from .invocation_queue_common import InvocationQueueItem + + +class InvocationQueueABC(ABC): + """Abstract base class for all invocation queues""" + + @abstractmethod + def get(self) -> InvocationQueueItem: + pass + + @abstractmethod + def put(self, item: Optional[InvocationQueueItem]) -> None: + pass + + @abstractmethod + def cancel(self, graph_execution_state_id: str) -> None: + pass + + @abstractmethod + def is_canceled(self, graph_execution_state_id: str) -> bool: + pass diff --git a/invokeai/app/services/invocation_queue/invocation_queue_common.py b/invokeai/app/services/invocation_queue/invocation_queue_common.py new file mode 100644 index 0000000000..88e72886f7 --- /dev/null +++ b/invokeai/app/services/invocation_queue/invocation_queue_common.py @@ -0,0 +1,19 @@ +# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) + +import time + +from pydantic import BaseModel, Field + + +class InvocationQueueItem(BaseModel): + graph_execution_state_id: str = Field(description="The ID of the graph execution state") + invocation_id: str = Field(description="The ID of the node being invoked") + session_queue_id: str = Field(description="The ID of the session queue from which this invocation queue item came") + session_queue_item_id: int = Field( + description="The ID of session queue item from which this invocation queue item came" + ) + session_queue_batch_id: str = Field( + description="The ID of the session batch from which this invocation queue item came" + ) + invoke_all: bool = Field(default=False) + timestamp: float = Field(default_factory=time.time) diff --git a/invokeai/app/services/invocation_queue.py b/invokeai/app/services/invocation_queue/invocation_queue_memory.py similarity index 52% rename from invokeai/app/services/invocation_queue.py rename to invokeai/app/services/invocation_queue/invocation_queue_memory.py index 378a9d12cf..33e82fae18 100644 --- a/invokeai/app/services/invocation_queue.py +++ b/invokeai/app/services/invocation_queue/invocation_queue_memory.py @@ -1,45 +1,11 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) import time -from abc import ABC, abstractmethod from queue import Queue from typing import Optional -from pydantic import BaseModel, Field - - -class InvocationQueueItem(BaseModel): - graph_execution_state_id: str = Field(description="The ID of the graph execution state") - invocation_id: str = Field(description="The ID of the node being invoked") - session_queue_id: str = Field(description="The ID of the session queue from which this invocation queue item came") - session_queue_item_id: int = Field( - description="The ID of session queue item from which this invocation queue item came" - ) - session_queue_batch_id: str = Field( - description="The ID of the session batch from which this invocation queue item came" - ) - invoke_all: bool = Field(default=False) - timestamp: float = Field(default_factory=time.time) - - -class InvocationQueueABC(ABC): - """Abstract base class for all invocation queues""" - - @abstractmethod - def get(self) -> InvocationQueueItem: - pass - - @abstractmethod - def put(self, item: Optional[InvocationQueueItem]) -> None: - pass - - @abstractmethod - def cancel(self, graph_execution_state_id: str) -> None: - pass - - @abstractmethod - def is_canceled(self, graph_execution_state_id: str) -> bool: - pass +from .invocation_queue_base import InvocationQueueABC +from .invocation_queue_common import InvocationQueueItem class MemoryInvocationQueue(InvocationQueueABC): diff --git a/invokeai/app/services/invocation_services.py b/invokeai/app/services/invocation_services.py index 09a5df0cd9..ba53ea50cf 100644 --- a/invokeai/app/services/invocation_services.py +++ b/invokeai/app/services/invocation_services.py @@ -6,27 +6,27 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: from logging import Logger - from invokeai.app.services.board_image_record_storage import BoardImageRecordStorageBase - from invokeai.app.services.board_images import BoardImagesServiceABC - from invokeai.app.services.board_record_storage import BoardRecordStorageBase - from invokeai.app.services.boards import BoardServiceABC - from invokeai.app.services.config import InvokeAIAppConfig - from invokeai.app.services.events import EventServiceBase - from invokeai.app.services.graph import GraphExecutionState, LibraryGraph - from invokeai.app.services.image_file_storage import ImageFileStorageBase - from invokeai.app.services.image_record_storage import ImageRecordStorageBase - from invokeai.app.services.images import ImageServiceABC - from invokeai.app.services.invocation_cache.invocation_cache_base import InvocationCacheBase - from invokeai.app.services.invocation_queue import InvocationQueueABC - from invokeai.app.services.invocation_stats import InvocationStatsServiceBase - from invokeai.app.services.invoker import InvocationProcessorABC - from invokeai.app.services.item_storage import ItemStorageABC - from invokeai.app.services.latent_storage import LatentsStorageBase - from invokeai.app.services.model_manager_service import ModelManagerServiceBase - from invokeai.app.services.resource_name import NameServiceBase - from invokeai.app.services.session_processor.session_processor_base import SessionProcessorBase - from invokeai.app.services.session_queue.session_queue_base import SessionQueueBase - from invokeai.app.services.urls import UrlServiceBase + from .board_image_records.board_image_records_base import BoardImageRecordStorageBase + from .board_images.board_images_base import BoardImagesServiceABC + from .board_records.board_records_base import BoardRecordStorageBase + from .boards.boards_base import BoardServiceABC + from .config import InvokeAIAppConfig + from .events.events_base import EventServiceBase + from .image_files.image_files_base import ImageFileStorageBase + from .image_records.image_records_base import ImageRecordStorageBase + from .images.images_base import ImageServiceABC + from .invocation_cache.invocation_cache_base import InvocationCacheBase + from .invocation_processor.invocation_processor_base import InvocationProcessorABC + from .invocation_queue.invocation_queue_base import InvocationQueueABC + from .invocation_stats.invocation_stats_base import InvocationStatsServiceBase + from .item_storage.item_storage_base import ItemStorageABC + from .latents_storage.latents_storage_base import LatentsStorageBase + from .model_manager.model_manager_base import ModelManagerServiceBase + from .names.names_base import NameServiceBase + from .session_processor.session_processor_base import SessionProcessorBase + from .session_queue.session_queue_base import SessionQueueBase + from .shared.graph import GraphExecutionState, LibraryGraph + from .urls.urls_base import UrlServiceBase class InvocationServices: diff --git a/invokeai/app/services/invocation_stats/__init__.py b/invokeai/app/services/invocation_stats/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/app/services/invocation_stats/invocation_stats_base.py b/invokeai/app/services/invocation_stats/invocation_stats_base.py new file mode 100644 index 0000000000..7db653c3fb --- /dev/null +++ b/invokeai/app/services/invocation_stats/invocation_stats_base.py @@ -0,0 +1,121 @@ +# Copyright 2023 Lincoln D. Stein +"""Utility to collect execution time and GPU usage stats on invocations in flight + +Usage: + +statistics = InvocationStatsService(graph_execution_manager) +with statistics.collect_stats(invocation, graph_execution_state.id): + ... execute graphs... +statistics.log_stats() + +Typical output: +[2023-08-02 18:03:04,507]::[InvokeAI]::INFO --> Graph stats: c7764585-9c68-4d9d-a199-55e8186790f3 +[2023-08-02 18:03:04,507]::[InvokeAI]::INFO --> Node Calls Seconds VRAM Used +[2023-08-02 18:03:04,507]::[InvokeAI]::INFO --> main_model_loader 1 0.005s 0.01G +[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> clip_skip 1 0.004s 0.01G +[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> compel 2 0.512s 0.26G +[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> rand_int 1 0.001s 0.01G +[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> range_of_size 1 0.001s 0.01G +[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> iterate 1 0.001s 0.01G +[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> metadata_accumulator 1 0.002s 0.01G +[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> noise 1 0.002s 0.01G +[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> t2l 1 3.541s 1.93G +[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> l2i 1 0.679s 0.58G +[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> TOTAL GRAPH EXECUTION TIME: 4.749s +[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> Current VRAM utilization 0.01G + +The abstract base class for this class is InvocationStatsServiceBase. An implementing class which +writes to the system log is stored in InvocationServices.performance_statistics. +""" + +from abc import ABC, abstractmethod +from contextlib import AbstractContextManager +from typing import Dict + +from invokeai.app.invocations.baseinvocation import BaseInvocation +from invokeai.backend.model_management.model_cache import CacheStats + +from .invocation_stats_common import NodeLog + + +class InvocationStatsServiceBase(ABC): + "Abstract base class for recording node memory/time performance statistics" + + # {graph_id => NodeLog} + _stats: Dict[str, NodeLog] + _cache_stats: Dict[str, CacheStats] + ram_used: float + ram_changed: float + + @abstractmethod + def __init__(self): + """ + Initialize the InvocationStatsService and reset counters to zero + """ + pass + + @abstractmethod + def collect_stats( + self, + invocation: BaseInvocation, + graph_execution_state_id: str, + ) -> AbstractContextManager: + """ + Return a context object that will capture the statistics on the execution + of invocaation. Use with: to place around the part of the code that executes the invocation. + :param invocation: BaseInvocation object from the current graph. + :param graph_execution_state_id: The id of the current session. + """ + pass + + @abstractmethod + def reset_stats(self, graph_execution_state_id: str): + """ + Reset all statistics for the indicated graph + :param graph_execution_state_id + """ + pass + + @abstractmethod + def reset_all_stats(self): + """Zero all statistics""" + pass + + @abstractmethod + def update_invocation_stats( + self, + graph_id: str, + invocation_type: str, + time_used: float, + vram_used: float, + ): + """ + Add timing information on execution of a node. Usually + used internally. + :param graph_id: ID of the graph that is currently executing + :param invocation_type: String literal type of the node + :param time_used: Time used by node's exection (sec) + :param vram_used: Maximum VRAM used during exection (GB) + """ + pass + + @abstractmethod + def log_stats(self): + """ + Write out the accumulated statistics to the log or somewhere else. + """ + pass + + @abstractmethod + def update_mem_stats( + self, + ram_used: float, + ram_changed: float, + ): + """ + Update the collector with RAM memory usage info. + + :param ram_used: How much RAM is currently in use. + :param ram_changed: How much RAM changed since last generation. + """ + pass diff --git a/invokeai/app/services/invocation_stats/invocation_stats_common.py b/invokeai/app/services/invocation_stats/invocation_stats_common.py new file mode 100644 index 0000000000..19b954f6da --- /dev/null +++ b/invokeai/app/services/invocation_stats/invocation_stats_common.py @@ -0,0 +1,25 @@ +from dataclasses import dataclass, field +from typing import Dict + +# size of GIG in bytes +GIG = 1073741824 + + +@dataclass +class NodeStats: + """Class for tracking execution stats of an invocation node""" + + calls: int = 0 + time_used: float = 0.0 # seconds + max_vram: float = 0.0 # GB + cache_hits: int = 0 + cache_misses: int = 0 + cache_high_watermark: int = 0 + + +@dataclass +class NodeLog: + """Class for tracking node usage""" + + # {node_type => NodeStats} + nodes: Dict[str, NodeStats] = field(default_factory=dict) diff --git a/invokeai/app/services/invocation_stats.py b/invokeai/app/services/invocation_stats/invocation_stats_default.py similarity index 56% rename from invokeai/app/services/invocation_stats.py rename to invokeai/app/services/invocation_stats/invocation_stats_default.py index 6799031eff..2041ab6190 100644 --- a/invokeai/app/services/invocation_stats.py +++ b/invokeai/app/services/invocation_stats/invocation_stats_default.py @@ -1,154 +1,17 @@ -# Copyright 2023 Lincoln D. Stein -"""Utility to collect execution time and GPU usage stats on invocations in flight - -Usage: - -statistics = InvocationStatsService(graph_execution_manager) -with statistics.collect_stats(invocation, graph_execution_state.id): - ... execute graphs... -statistics.log_stats() - -Typical output: -[2023-08-02 18:03:04,507]::[InvokeAI]::INFO --> Graph stats: c7764585-9c68-4d9d-a199-55e8186790f3 -[2023-08-02 18:03:04,507]::[InvokeAI]::INFO --> Node Calls Seconds VRAM Used -[2023-08-02 18:03:04,507]::[InvokeAI]::INFO --> main_model_loader 1 0.005s 0.01G -[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> clip_skip 1 0.004s 0.01G -[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> compel 2 0.512s 0.26G -[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> rand_int 1 0.001s 0.01G -[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> range_of_size 1 0.001s 0.01G -[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> iterate 1 0.001s 0.01G -[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> metadata_accumulator 1 0.002s 0.01G -[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> noise 1 0.002s 0.01G -[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> t2l 1 3.541s 1.93G -[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> l2i 1 0.679s 0.58G -[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> TOTAL GRAPH EXECUTION TIME: 4.749s -[2023-08-02 18:03:04,508]::[InvokeAI]::INFO --> Current VRAM utilization 0.01G - -The abstract base class for this class is InvocationStatsServiceBase. An implementing class which -writes to the system log is stored in InvocationServices.performance_statistics. -""" - import time -from abc import ABC, abstractmethod -from contextlib import AbstractContextManager -from dataclasses import dataclass, field from typing import Dict import psutil import torch import invokeai.backend.util.logging as logger +from invokeai.app.invocations.baseinvocation import BaseInvocation from invokeai.app.services.invoker import Invoker +from invokeai.app.services.model_manager.model_manager_base import ModelManagerServiceBase from invokeai.backend.model_management.model_cache import CacheStats -from ..invocations.baseinvocation import BaseInvocation -from .model_manager_service import ModelManagerServiceBase - -# size of GIG in bytes -GIG = 1073741824 - - -@dataclass -class NodeStats: - """Class for tracking execution stats of an invocation node""" - - calls: int = 0 - time_used: float = 0.0 # seconds - max_vram: float = 0.0 # GB - cache_hits: int = 0 - cache_misses: int = 0 - cache_high_watermark: int = 0 - - -@dataclass -class NodeLog: - """Class for tracking node usage""" - - # {node_type => NodeStats} - nodes: Dict[str, NodeStats] = field(default_factory=dict) - - -class InvocationStatsServiceBase(ABC): - "Abstract base class for recording node memory/time performance statistics" - - # {graph_id => NodeLog} - _stats: Dict[str, NodeLog] - _cache_stats: Dict[str, CacheStats] - ram_used: float - ram_changed: float - - @abstractmethod - def __init__(self): - """ - Initialize the InvocationStatsService and reset counters to zero - """ - pass - - @abstractmethod - def collect_stats( - self, - invocation: BaseInvocation, - graph_execution_state_id: str, - ) -> AbstractContextManager: - """ - Return a context object that will capture the statistics on the execution - of invocaation. Use with: to place around the part of the code that executes the invocation. - :param invocation: BaseInvocation object from the current graph. - :param graph_execution_state: GraphExecutionState object from the current session. - """ - pass - - @abstractmethod - def reset_stats(self, graph_execution_state_id: str): - """ - Reset all statistics for the indicated graph - :param graph_execution_state_id - """ - pass - - @abstractmethod - def reset_all_stats(self): - """Zero all statistics""" - pass - - @abstractmethod - def update_invocation_stats( - self, - graph_id: str, - invocation_type: str, - time_used: float, - vram_used: float, - ): - """ - Add timing information on execution of a node. Usually - used internally. - :param graph_id: ID of the graph that is currently executing - :param invocation_type: String literal type of the node - :param time_used: Time used by node's exection (sec) - :param vram_used: Maximum VRAM used during exection (GB) - """ - pass - - @abstractmethod - def log_stats(self): - """ - Write out the accumulated statistics to the log or somewhere else. - """ - pass - - @abstractmethod - def update_mem_stats( - self, - ram_used: float, - ram_changed: float, - ): - """ - Update the collector with RAM memory usage info. - - :param ram_used: How much RAM is currently in use. - :param ram_changed: How much RAM changed since last generation. - """ - pass +from .invocation_stats_base import InvocationStatsServiceBase +from .invocation_stats_common import GIG, NodeLog, NodeStats class InvocationStatsService(InvocationStatsServiceBase): diff --git a/invokeai/app/services/invoker.py b/invokeai/app/services/invoker.py index 0c98fc285c..134bec2693 100644 --- a/invokeai/app/services/invoker.py +++ b/invokeai/app/services/invoker.py @@ -1,11 +1,10 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) -from abc import ABC from typing import Optional -from .graph import Graph, GraphExecutionState -from .invocation_queue import InvocationQueueItem +from .invocation_queue.invocation_queue_common import InvocationQueueItem from .invocation_services import InvocationServices +from .shared.graph import Graph, GraphExecutionState class Invoker: @@ -84,7 +83,3 @@ class Invoker: self.__stop_service(getattr(self.services, service)) self.services.queue.put(None) - - -class InvocationProcessorABC(ABC): - pass diff --git a/invokeai/app/services/item_storage/__init__.py b/invokeai/app/services/item_storage/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/app/services/item_storage.py b/invokeai/app/services/item_storage/item_storage_base.py similarity index 95% rename from invokeai/app/services/item_storage.py rename to invokeai/app/services/item_storage/item_storage_base.py index 290035b086..1446c0cd08 100644 --- a/invokeai/app/services/item_storage.py +++ b/invokeai/app/services/item_storage/item_storage_base.py @@ -9,6 +9,8 @@ T = TypeVar("T", bound=BaseModel) class ItemStorageABC(ABC, Generic[T]): + """Provides storage for a single type of item. The type must be a Pydantic model.""" + _on_changed_callbacks: list[Callable[[T], None]] _on_deleted_callbacks: list[Callable[[str], None]] diff --git a/invokeai/app/services/sqlite.py b/invokeai/app/services/item_storage/item_storage_sqlite.py similarity index 95% rename from invokeai/app/services/sqlite.py rename to invokeai/app/services/item_storage/item_storage_sqlite.py index eae714a795..b810baf9fd 100644 --- a/invokeai/app/services/sqlite.py +++ b/invokeai/app/services/item_storage/item_storage_sqlite.py @@ -4,15 +4,13 @@ from typing import Generic, Optional, TypeVar, get_args from pydantic import BaseModel, parse_raw_as -from invokeai.app.services.shared.sqlite import SqliteDatabase from invokeai.app.services.shared.pagination import PaginatedResults +from invokeai.app.services.shared.sqlite import SqliteDatabase -from .item_storage import ItemStorageABC +from .item_storage_base import ItemStorageABC T = TypeVar("T", bound=BaseModel) -sqlite_memory = ":memory:" - class SqliteItemStorage(ItemStorageABC, Generic[T]): _table_name: str @@ -47,7 +45,8 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): self._lock.release() def _parse_item(self, item: str) -> T: - item_type = get_args(self.__orig_class__)[0] + # __orig_class__ is technically an implementation detail of the typing module, not a supported API + item_type = get_args(self.__orig_class__)[0] # type: ignore return parse_raw_as(item_type, item) def set(self, item: T): diff --git a/invokeai/app/services/latent_storage.py b/invokeai/app/services/latent_storage.py deleted file mode 100644 index 8605ef5abd..0000000000 --- a/invokeai/app/services/latent_storage.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) - -from abc import ABC, abstractmethod -from pathlib import Path -from queue import Queue -from typing import Callable, Dict, Optional, Union - -import torch - - -class LatentsStorageBase(ABC): - """Responsible for storing and retrieving latents.""" - - _on_changed_callbacks: list[Callable[[torch.Tensor], None]] - _on_deleted_callbacks: list[Callable[[str], None]] - - def __init__(self) -> None: - self._on_changed_callbacks = list() - self._on_deleted_callbacks = list() - - @abstractmethod - def get(self, name: str) -> torch.Tensor: - pass - - @abstractmethod - def save(self, name: str, data: torch.Tensor) -> None: - pass - - @abstractmethod - def delete(self, name: str) -> None: - pass - - def on_changed(self, on_changed: Callable[[torch.Tensor], None]) -> None: - """Register a callback for when an item is changed""" - self._on_changed_callbacks.append(on_changed) - - def on_deleted(self, on_deleted: Callable[[str], None]) -> None: - """Register a callback for when an item is deleted""" - self._on_deleted_callbacks.append(on_deleted) - - def _on_changed(self, item: torch.Tensor) -> None: - for callback in self._on_changed_callbacks: - callback(item) - - def _on_deleted(self, item_id: str) -> None: - for callback in self._on_deleted_callbacks: - callback(item_id) - - -class ForwardCacheLatentsStorage(LatentsStorageBase): - """Caches the latest N latents in memory, writing-thorugh to and reading from underlying storage""" - - __cache: Dict[str, torch.Tensor] - __cache_ids: Queue - __max_cache_size: int - __underlying_storage: LatentsStorageBase - - def __init__(self, underlying_storage: LatentsStorageBase, max_cache_size: int = 20): - super().__init__() - self.__underlying_storage = underlying_storage - self.__cache = dict() - self.__cache_ids = Queue() - self.__max_cache_size = max_cache_size - - def get(self, name: str) -> torch.Tensor: - cache_item = self.__get_cache(name) - if cache_item is not None: - return cache_item - - latent = self.__underlying_storage.get(name) - self.__set_cache(name, latent) - return latent - - def save(self, name: str, data: torch.Tensor) -> None: - self.__underlying_storage.save(name, data) - self.__set_cache(name, data) - self._on_changed(data) - - def delete(self, name: str) -> None: - self.__underlying_storage.delete(name) - if name in self.__cache: - del self.__cache[name] - self._on_deleted(name) - - def __get_cache(self, name: str) -> Optional[torch.Tensor]: - return None if name not in self.__cache else self.__cache[name] - - def __set_cache(self, name: str, data: torch.Tensor): - if name not in self.__cache: - self.__cache[name] = data - self.__cache_ids.put(name) - if self.__cache_ids.qsize() > self.__max_cache_size: - self.__cache.pop(self.__cache_ids.get()) - - -class DiskLatentsStorage(LatentsStorageBase): - """Stores latents in a folder on disk without caching""" - - __output_folder: Union[str, Path] - - def __init__(self, output_folder: Union[str, Path]): - self.__output_folder = output_folder if isinstance(output_folder, Path) else Path(output_folder) - self.__output_folder.mkdir(parents=True, exist_ok=True) - - def get(self, name: str) -> torch.Tensor: - latent_path = self.get_path(name) - return torch.load(latent_path) - - def save(self, name: str, data: torch.Tensor) -> None: - self.__output_folder.mkdir(parents=True, exist_ok=True) - latent_path = self.get_path(name) - torch.save(data, latent_path) - - def delete(self, name: str) -> None: - latent_path = self.get_path(name) - latent_path.unlink() - - def get_path(self, name: str) -> Path: - return self.__output_folder / name diff --git a/invokeai/app/services/latents_storage/__init__.py b/invokeai/app/services/latents_storage/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/app/services/latents_storage/latents_storage_base.py b/invokeai/app/services/latents_storage/latents_storage_base.py new file mode 100644 index 0000000000..4850a477d3 --- /dev/null +++ b/invokeai/app/services/latents_storage/latents_storage_base.py @@ -0,0 +1,45 @@ +# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) + +from abc import ABC, abstractmethod +from typing import Callable + +import torch + + +class LatentsStorageBase(ABC): + """Responsible for storing and retrieving latents.""" + + _on_changed_callbacks: list[Callable[[torch.Tensor], None]] + _on_deleted_callbacks: list[Callable[[str], None]] + + def __init__(self) -> None: + self._on_changed_callbacks = list() + self._on_deleted_callbacks = list() + + @abstractmethod + def get(self, name: str) -> torch.Tensor: + pass + + @abstractmethod + def save(self, name: str, data: torch.Tensor) -> None: + pass + + @abstractmethod + def delete(self, name: str) -> None: + pass + + def on_changed(self, on_changed: Callable[[torch.Tensor], None]) -> None: + """Register a callback for when an item is changed""" + self._on_changed_callbacks.append(on_changed) + + def on_deleted(self, on_deleted: Callable[[str], None]) -> None: + """Register a callback for when an item is deleted""" + self._on_deleted_callbacks.append(on_deleted) + + def _on_changed(self, item: torch.Tensor) -> None: + for callback in self._on_changed_callbacks: + callback(item) + + def _on_deleted(self, item_id: str) -> None: + for callback in self._on_deleted_callbacks: + callback(item_id) diff --git a/invokeai/app/services/latents_storage/latents_storage_disk.py b/invokeai/app/services/latents_storage/latents_storage_disk.py new file mode 100644 index 0000000000..6e7010bae0 --- /dev/null +++ b/invokeai/app/services/latents_storage/latents_storage_disk.py @@ -0,0 +1,34 @@ +# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) + +from pathlib import Path +from typing import Union + +import torch + +from .latents_storage_base import LatentsStorageBase + + +class DiskLatentsStorage(LatentsStorageBase): + """Stores latents in a folder on disk without caching""" + + __output_folder: Path + + def __init__(self, output_folder: Union[str, Path]): + self.__output_folder = output_folder if isinstance(output_folder, Path) else Path(output_folder) + self.__output_folder.mkdir(parents=True, exist_ok=True) + + def get(self, name: str) -> torch.Tensor: + latent_path = self.get_path(name) + return torch.load(latent_path) + + def save(self, name: str, data: torch.Tensor) -> None: + self.__output_folder.mkdir(parents=True, exist_ok=True) + latent_path = self.get_path(name) + torch.save(data, latent_path) + + def delete(self, name: str) -> None: + latent_path = self.get_path(name) + latent_path.unlink() + + def get_path(self, name: str) -> Path: + return self.__output_folder / name diff --git a/invokeai/app/services/latents_storage/latents_storage_forward_cache.py b/invokeai/app/services/latents_storage/latents_storage_forward_cache.py new file mode 100644 index 0000000000..5248362ff5 --- /dev/null +++ b/invokeai/app/services/latents_storage/latents_storage_forward_cache.py @@ -0,0 +1,54 @@ +# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) + +from queue import Queue +from typing import Dict, Optional + +import torch + +from .latents_storage_base import LatentsStorageBase + + +class ForwardCacheLatentsStorage(LatentsStorageBase): + """Caches the latest N latents in memory, writing-thorugh to and reading from underlying storage""" + + __cache: Dict[str, torch.Tensor] + __cache_ids: Queue + __max_cache_size: int + __underlying_storage: LatentsStorageBase + + def __init__(self, underlying_storage: LatentsStorageBase, max_cache_size: int = 20): + super().__init__() + self.__underlying_storage = underlying_storage + self.__cache = dict() + self.__cache_ids = Queue() + self.__max_cache_size = max_cache_size + + def get(self, name: str) -> torch.Tensor: + cache_item = self.__get_cache(name) + if cache_item is not None: + return cache_item + + latent = self.__underlying_storage.get(name) + self.__set_cache(name, latent) + return latent + + def save(self, name: str, data: torch.Tensor) -> None: + self.__underlying_storage.save(name, data) + self.__set_cache(name, data) + self._on_changed(data) + + def delete(self, name: str) -> None: + self.__underlying_storage.delete(name) + if name in self.__cache: + del self.__cache[name] + self._on_deleted(name) + + def __get_cache(self, name: str) -> Optional[torch.Tensor]: + return None if name not in self.__cache else self.__cache[name] + + def __set_cache(self, name: str, data: torch.Tensor): + if name not in self.__cache: + self.__cache[name] = data + self.__cache_ids.put(name) + if self.__cache_ids.qsize() > self.__max_cache_size: + self.__cache.pop(self.__cache_ids.get()) diff --git a/invokeai/app/services/model_manager/__init__.py b/invokeai/app/services/model_manager/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/app/services/model_manager/model_manager_base.py b/invokeai/app/services/model_manager/model_manager_base.py new file mode 100644 index 0000000000..bb9110ba0a --- /dev/null +++ b/invokeai/app/services/model_manager/model_manager_base.py @@ -0,0 +1,286 @@ +# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Team + +from __future__ import annotations + +from abc import ABC, abstractmethod +from logging import Logger +from pathlib import Path +from typing import TYPE_CHECKING, Callable, List, Literal, Optional, Tuple, Union + +from pydantic import Field + +from invokeai.app.services.config.config_default import InvokeAIAppConfig +from invokeai.backend.model_management import ( + AddModelResult, + BaseModelType, + MergeInterpolationMethod, + ModelInfo, + ModelType, + SchedulerPredictionType, + SubModelType, +) +from invokeai.backend.model_management.model_cache import CacheStats + +if TYPE_CHECKING: + from invokeai.app.invocations.baseinvocation import BaseInvocation, InvocationContext + + +class ModelManagerServiceBase(ABC): + """Responsible for managing models on disk and in memory""" + + @abstractmethod + def __init__( + self, + config: InvokeAIAppConfig, + logger: Logger, + ): + """ + Initialize with the path to the models.yaml config file. + Optional parameters are the torch device type, precision, max_models, + and sequential_offload boolean. Note that the default device + type and precision are set up for a CUDA system running at half precision. + """ + pass + + @abstractmethod + def get_model( + self, + model_name: str, + base_model: BaseModelType, + model_type: ModelType, + submodel: Optional[SubModelType] = None, + node: Optional[BaseInvocation] = None, + context: Optional[InvocationContext] = None, + ) -> ModelInfo: + """Retrieve the indicated model with name and type. + submodel can be used to get a part (such as the vae) + of a diffusers pipeline.""" + pass + + @property + @abstractmethod + def logger(self): + pass + + @abstractmethod + def model_exists( + self, + model_name: str, + base_model: BaseModelType, + model_type: ModelType, + ) -> bool: + pass + + @abstractmethod + def model_info(self, model_name: str, base_model: BaseModelType, model_type: ModelType) -> dict: + """ + Given a model name returns a dict-like (OmegaConf) object describing it. + Uses the exact format as the omegaconf stanza. + """ + pass + + @abstractmethod + def list_models(self, base_model: Optional[BaseModelType] = None, model_type: Optional[ModelType] = None) -> dict: + """ + Return a dict of models in the format: + { model_type1: + { model_name1: {'status': 'active'|'cached'|'not loaded', + 'model_name' : name, + 'model_type' : SDModelType, + 'description': description, + 'format': 'folder'|'safetensors'|'ckpt' + }, + model_name2: { etc } + }, + model_type2: + { model_name_n: etc + } + """ + pass + + @abstractmethod + def list_model(self, model_name: str, base_model: BaseModelType, model_type: ModelType) -> dict: + """ + Return information about the model using the same format as list_models() + """ + pass + + @abstractmethod + def model_names(self) -> List[Tuple[str, BaseModelType, ModelType]]: + """ + Returns a list of all the model names known. + """ + pass + + @abstractmethod + def add_model( + self, + model_name: str, + base_model: BaseModelType, + model_type: ModelType, + model_attributes: dict, + clobber: bool = False, + ) -> AddModelResult: + """ + Update the named model with a dictionary of attributes. Will fail with an + assertion error if the name already exists. Pass clobber=True to overwrite. + On a successful update, the config will be changed in memory. Will fail + with an assertion error if provided attributes are incorrect or + the model name is missing. Call commit() to write changes to disk. + """ + pass + + @abstractmethod + def update_model( + self, + model_name: str, + base_model: BaseModelType, + model_type: ModelType, + model_attributes: dict, + ) -> AddModelResult: + """ + Update the named model with a dictionary of attributes. Will fail with a + ModelNotFoundException if the name does not already exist. + + On a successful update, the config will be changed in memory. Will fail + with an assertion error if provided attributes are incorrect or + the model name is missing. Call commit() to write changes to disk. + """ + pass + + @abstractmethod + def del_model( + self, + model_name: str, + base_model: BaseModelType, + model_type: ModelType, + ): + """ + Delete the named model from configuration. If delete_files is true, + then the underlying weight file or diffusers directory will be deleted + as well. Call commit() to write to disk. + """ + pass + + @abstractmethod + def rename_model( + self, + model_name: str, + base_model: BaseModelType, + model_type: ModelType, + new_name: str, + ): + """ + Rename the indicated model. + """ + pass + + @abstractmethod + def list_checkpoint_configs(self) -> List[Path]: + """ + List the checkpoint config paths from ROOT/configs/stable-diffusion. + """ + pass + + @abstractmethod + def convert_model( + self, + model_name: str, + base_model: BaseModelType, + model_type: Literal[ModelType.Main, ModelType.Vae], + ) -> AddModelResult: + """ + Convert a checkpoint file into a diffusers folder, deleting the cached + version and deleting the original checkpoint file if it is in the models + directory. + :param model_name: Name of the model to convert + :param base_model: Base model type + :param model_type: Type of model ['vae' or 'main'] + + This will raise a ValueError unless the model is not a checkpoint. It will + also raise a ValueError in the event that there is a similarly-named diffusers + directory already in place. + """ + pass + + @abstractmethod + def heuristic_import( + self, + items_to_import: set[str], + prediction_type_helper: Optional[Callable[[Path], SchedulerPredictionType]] = None, + ) -> dict[str, AddModelResult]: + """Import a list of paths, repo_ids or URLs. Returns the set of + successfully imported items. + :param items_to_import: Set of strings corresponding to models to be imported. + :param prediction_type_helper: A callback that receives the Path of a Stable Diffusion 2 checkpoint model and returns a SchedulerPredictionType. + + The prediction type helper is necessary to distinguish between + models based on Stable Diffusion 2 Base (requiring + SchedulerPredictionType.Epsilson) and Stable Diffusion 768 + (requiring SchedulerPredictionType.VPrediction). It is + generally impossible to do this programmatically, so the + prediction_type_helper usually asks the user to choose. + + The result is a set of successfully installed models. Each element + of the set is a dict corresponding to the newly-created OmegaConf stanza for + that model. + """ + pass + + @abstractmethod + def merge_models( + self, + model_names: List[str] = Field( + default=None, min_items=2, max_items=3, description="List of model names to merge" + ), + base_model: Union[BaseModelType, str] = Field( + default=None, description="Base model shared by all models to be merged" + ), + merged_model_name: str = Field(default=None, description="Name of destination model after merging"), + alpha: Optional[float] = 0.5, + interp: Optional[MergeInterpolationMethod] = None, + force: Optional[bool] = False, + merge_dest_directory: Optional[Path] = None, + ) -> AddModelResult: + """ + Merge two to three diffusrs pipeline models and save as a new model. + :param model_names: List of 2-3 models to merge + :param base_model: Base model to use for all models + :param merged_model_name: Name of destination merged model + :param alpha: Alpha strength to apply to 2d and 3d model + :param interp: Interpolation method. None (default) + :param merge_dest_directory: Save the merged model to the designated directory (with 'merged_model_name' appended) + """ + pass + + @abstractmethod + def search_for_models(self, directory: Path) -> List[Path]: + """ + Return list of all models found in the designated directory. + """ + pass + + @abstractmethod + def sync_to_config(self): + """ + Re-read models.yaml, rescan the models directory, and reimport models + in the autoimport directories. Call after making changes outside the + model manager API. + """ + pass + + @abstractmethod + def collect_cache_stats(self, cache_stats: CacheStats): + """ + Reset model cache statistics for graph with graph_id. + """ + pass + + @abstractmethod + def commit(self, conf_file: Optional[Path] = None) -> None: + """ + Write current configuration out to the indicated file. + If no conf_file is provided, then replaces the + original file/database used to initialize the object. + """ + pass diff --git a/invokeai/app/services/model_manager_service.py b/invokeai/app/services/model_manager/model_manager_default.py similarity index 62% rename from invokeai/app/services/model_manager_service.py rename to invokeai/app/services/model_manager/model_manager_default.py index 143fa8f357..263f804b4d 100644 --- a/invokeai/app/services/model_manager_service.py +++ b/invokeai/app/services/model_manager/model_manager_default.py @@ -2,16 +2,15 @@ from __future__ import annotations -from abc import ABC, abstractmethod from logging import Logger from pathlib import Path -from types import ModuleType from typing import TYPE_CHECKING, Callable, List, Literal, Optional, Tuple, Union import torch from pydantic import Field -from invokeai.app.models.exceptions import CanceledException +from invokeai.app.services.config.config_default import InvokeAIAppConfig +from invokeai.app.services.invocation_processor.invocation_processor_common import CanceledException from invokeai.backend.model_management import ( AddModelResult, BaseModelType, @@ -26,273 +25,12 @@ from invokeai.backend.model_management import ( ) from invokeai.backend.model_management.model_cache import CacheStats from invokeai.backend.model_management.model_search import FindModels +from invokeai.backend.util import choose_precision, choose_torch_device -from ...backend.util import choose_precision, choose_torch_device -from .config import InvokeAIAppConfig +from .model_manager_base import ModelManagerServiceBase if TYPE_CHECKING: - from ..invocations.baseinvocation import BaseInvocation, InvocationContext - - -class ModelManagerServiceBase(ABC): - """Responsible for managing models on disk and in memory""" - - @abstractmethod - def __init__( - self, - config: InvokeAIAppConfig, - logger: ModuleType, - ): - """ - Initialize with the path to the models.yaml config file. - Optional parameters are the torch device type, precision, max_models, - and sequential_offload boolean. Note that the default device - type and precision are set up for a CUDA system running at half precision. - """ - pass - - @abstractmethod - def get_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - submodel: Optional[SubModelType] = None, - node: Optional[BaseInvocation] = None, - context: Optional[InvocationContext] = None, - ) -> ModelInfo: - """Retrieve the indicated model with name and type. - submodel can be used to get a part (such as the vae) - of a diffusers pipeline.""" - pass - - @property - @abstractmethod - def logger(self): - pass - - @abstractmethod - def model_exists( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - ) -> bool: - pass - - @abstractmethod - def model_info(self, model_name: str, base_model: BaseModelType, model_type: ModelType) -> dict: - """ - Given a model name returns a dict-like (OmegaConf) object describing it. - Uses the exact format as the omegaconf stanza. - """ - pass - - @abstractmethod - def list_models(self, base_model: Optional[BaseModelType] = None, model_type: Optional[ModelType] = None) -> dict: - """ - Return a dict of models in the format: - { model_type1: - { model_name1: {'status': 'active'|'cached'|'not loaded', - 'model_name' : name, - 'model_type' : SDModelType, - 'description': description, - 'format': 'folder'|'safetensors'|'ckpt' - }, - model_name2: { etc } - }, - model_type2: - { model_name_n: etc - } - """ - pass - - @abstractmethod - def list_model(self, model_name: str, base_model: BaseModelType, model_type: ModelType) -> dict: - """ - Return information about the model using the same format as list_models() - """ - pass - - @abstractmethod - def model_names(self) -> List[Tuple[str, BaseModelType, ModelType]]: - """ - Returns a list of all the model names known. - """ - pass - - @abstractmethod - def add_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - model_attributes: dict, - clobber: bool = False, - ) -> AddModelResult: - """ - Update the named model with a dictionary of attributes. Will fail with an - assertion error if the name already exists. Pass clobber=True to overwrite. - On a successful update, the config will be changed in memory. Will fail - with an assertion error if provided attributes are incorrect or - the model name is missing. Call commit() to write changes to disk. - """ - pass - - @abstractmethod - def update_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - model_attributes: dict, - ) -> AddModelResult: - """ - Update the named model with a dictionary of attributes. Will fail with a - ModelNotFoundException if the name does not already exist. - - On a successful update, the config will be changed in memory. Will fail - with an assertion error if provided attributes are incorrect or - the model name is missing. Call commit() to write changes to disk. - """ - pass - - @abstractmethod - def del_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - ): - """ - Delete the named model from configuration. If delete_files is true, - then the underlying weight file or diffusers directory will be deleted - as well. Call commit() to write to disk. - """ - pass - - @abstractmethod - def rename_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - new_name: str, - ): - """ - Rename the indicated model. - """ - pass - - @abstractmethod - def list_checkpoint_configs(self) -> List[Path]: - """ - List the checkpoint config paths from ROOT/configs/stable-diffusion. - """ - pass - - @abstractmethod - def convert_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: Literal[ModelType.Main, ModelType.Vae], - ) -> AddModelResult: - """ - Convert a checkpoint file into a diffusers folder, deleting the cached - version and deleting the original checkpoint file if it is in the models - directory. - :param model_name: Name of the model to convert - :param base_model: Base model type - :param model_type: Type of model ['vae' or 'main'] - - This will raise a ValueError unless the model is not a checkpoint. It will - also raise a ValueError in the event that there is a similarly-named diffusers - directory already in place. - """ - pass - - @abstractmethod - def heuristic_import( - self, - items_to_import: set[str], - prediction_type_helper: Optional[Callable[[Path], SchedulerPredictionType]] = None, - ) -> dict[str, AddModelResult]: - """Import a list of paths, repo_ids or URLs. Returns the set of - successfully imported items. - :param items_to_import: Set of strings corresponding to models to be imported. - :param prediction_type_helper: A callback that receives the Path of a Stable Diffusion 2 checkpoint model and returns a SchedulerPredictionType. - - The prediction type helper is necessary to distinguish between - models based on Stable Diffusion 2 Base (requiring - SchedulerPredictionType.Epsilson) and Stable Diffusion 768 - (requiring SchedulerPredictionType.VPrediction). It is - generally impossible to do this programmatically, so the - prediction_type_helper usually asks the user to choose. - - The result is a set of successfully installed models. Each element - of the set is a dict corresponding to the newly-created OmegaConf stanza for - that model. - """ - pass - - @abstractmethod - def merge_models( - self, - model_names: List[str] = Field( - default=None, min_items=2, max_items=3, description="List of model names to merge" - ), - base_model: Union[BaseModelType, str] = Field( - default=None, description="Base model shared by all models to be merged" - ), - merged_model_name: str = Field(default=None, description="Name of destination model after merging"), - alpha: Optional[float] = 0.5, - interp: Optional[MergeInterpolationMethod] = None, - force: Optional[bool] = False, - merge_dest_directory: Optional[Path] = None, - ) -> AddModelResult: - """ - Merge two to three diffusrs pipeline models and save as a new model. - :param model_names: List of 2-3 models to merge - :param base_model: Base model to use for all models - :param merged_model_name: Name of destination merged model - :param alpha: Alpha strength to apply to 2d and 3d model - :param interp: Interpolation method. None (default) - :param merge_dest_directory: Save the merged model to the designated directory (with 'merged_model_name' appended) - """ - pass - - @abstractmethod - def search_for_models(self, directory: Path) -> List[Path]: - """ - Return list of all models found in the designated directory. - """ - pass - - @abstractmethod - def sync_to_config(self): - """ - Re-read models.yaml, rescan the models directory, and reimport models - in the autoimport directories. Call after making changes outside the - model manager API. - """ - pass - - @abstractmethod - def collect_cache_stats(self, cache_stats: CacheStats): - """ - Reset model cache statistics for graph with graph_id. - """ - pass - - @abstractmethod - def commit(self, conf_file: Optional[Path] = None) -> None: - """ - Write current configuration out to the indicated file. - If no conf_file is provided, then replaces the - original file/database used to initialize the object. - """ - pass + from invokeai.app.invocations.baseinvocation import InvocationContext # simple implementation diff --git a/invokeai/app/services/names/__init__.py b/invokeai/app/services/names/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/app/services/names/names_base.py b/invokeai/app/services/names/names_base.py new file mode 100644 index 0000000000..f892c43c55 --- /dev/null +++ b/invokeai/app/services/names/names_base.py @@ -0,0 +1,11 @@ +from abc import ABC, abstractmethod + + +class NameServiceBase(ABC): + """Low-level service responsible for naming resources (images, latents, etc).""" + + # TODO: Add customizable naming schemes + @abstractmethod + def create_image_name(self) -> str: + """Creates a name for an image.""" + pass diff --git a/invokeai/app/services/names/names_common.py b/invokeai/app/services/names/names_common.py new file mode 100644 index 0000000000..7c69f8abe8 --- /dev/null +++ b/invokeai/app/services/names/names_common.py @@ -0,0 +1,8 @@ +from enum import Enum, EnumMeta + + +class ResourceType(str, Enum, metaclass=EnumMeta): + """Enum for resource types.""" + + IMAGE = "image" + LATENT = "latent" diff --git a/invokeai/app/services/names/names_default.py b/invokeai/app/services/names/names_default.py new file mode 100644 index 0000000000..104268c8bd --- /dev/null +++ b/invokeai/app/services/names/names_default.py @@ -0,0 +1,13 @@ +from invokeai.app.util.misc import uuid_string + +from .names_base import NameServiceBase + + +class SimpleNameService(NameServiceBase): + """Creates image names from UUIDs.""" + + # TODO: Add customizable naming schemes + def create_image_name(self) -> str: + uuid_str = uuid_string() + filename = f"{uuid_str}.png" + return filename diff --git a/invokeai/app/services/resource_name.py b/invokeai/app/services/resource_name.py deleted file mode 100644 index a17b1a084e..0000000000 --- a/invokeai/app/services/resource_name.py +++ /dev/null @@ -1,31 +0,0 @@ -from abc import ABC, abstractmethod -from enum import Enum, EnumMeta - -from invokeai.app.util.misc import uuid_string - - -class ResourceType(str, Enum, metaclass=EnumMeta): - """Enum for resource types.""" - - IMAGE = "image" - LATENT = "latent" - - -class NameServiceBase(ABC): - """Low-level service responsible for naming resources (images, latents, etc).""" - - # TODO: Add customizable naming schemes - @abstractmethod - def create_image_name(self) -> str: - """Creates a name for an image.""" - pass - - -class SimpleNameService(NameServiceBase): - """Creates image names from UUIDs.""" - - # TODO: Add customizable naming schemes - def create_image_name(self) -> str: - uuid_str = uuid_string() - filename = f"{uuid_str}.png" - return filename diff --git a/invokeai/app/services/session_processor/session_processor_default.py b/invokeai/app/services/session_processor/session_processor_default.py index 065b80e1a9..09aaefc0da 100644 --- a/invokeai/app/services/session_processor/session_processor_default.py +++ b/invokeai/app/services/session_processor/session_processor_default.py @@ -7,7 +7,7 @@ from typing import Optional from fastapi_events.handlers.local import local_handler from fastapi_events.typing import Event as FastAPIEvent -from invokeai.app.services.events import EventServiceBase +from invokeai.app.services.events.events_base import EventServiceBase from invokeai.app.services.session_queue.session_queue_common import SessionQueueItem from ..invoker import Invoker diff --git a/invokeai/app/services/session_queue/session_queue_base.py b/invokeai/app/services/session_queue/session_queue_base.py index 5df6f563ac..b5272f1868 100644 --- a/invokeai/app/services/session_queue/session_queue_base.py +++ b/invokeai/app/services/session_queue/session_queue_base.py @@ -1,7 +1,6 @@ from abc import ABC, abstractmethod from typing import Optional -from invokeai.app.services.graph import Graph from invokeai.app.services.session_queue.session_queue_common import ( QUEUE_ITEM_STATUS, Batch, @@ -18,6 +17,7 @@ from invokeai.app.services.session_queue.session_queue_common import ( SessionQueueItemDTO, SessionQueueStatus, ) +from invokeai.app.services.shared.graph import Graph from invokeai.app.services.shared.pagination import CursorPaginatedResults diff --git a/invokeai/app/services/session_queue/session_queue_common.py b/invokeai/app/services/session_queue/session_queue_common.py index a1eada6523..2d40a5b0c4 100644 --- a/invokeai/app/services/session_queue/session_queue_common.py +++ b/invokeai/app/services/session_queue/session_queue_common.py @@ -7,7 +7,7 @@ from pydantic import BaseModel, Field, StrictStr, parse_raw_as, root_validator, from pydantic.json import pydantic_encoder from invokeai.app.invocations.baseinvocation import BaseInvocation -from invokeai.app.services.graph import Graph, GraphExecutionState, NodeNotFoundError +from invokeai.app.services.shared.graph import Graph, GraphExecutionState, NodeNotFoundError from invokeai.app.util.misc import uuid_string # region Errors diff --git a/invokeai/app/services/session_queue/session_queue_sqlite.py b/invokeai/app/services/session_queue/session_queue_sqlite.py index 44ae50f007..0e12382392 100644 --- a/invokeai/app/services/session_queue/session_queue_sqlite.py +++ b/invokeai/app/services/session_queue/session_queue_sqlite.py @@ -5,8 +5,7 @@ from typing import Optional, Union, cast from fastapi_events.handlers.local import local_handler from fastapi_events.typing import Event as FastAPIEvent -from invokeai.app.services.events import EventServiceBase -from invokeai.app.services.graph import Graph +from invokeai.app.services.events.events_base import EventServiceBase from invokeai.app.services.invoker import Invoker from invokeai.app.services.session_queue.session_queue_base import SessionQueueBase from invokeai.app.services.session_queue.session_queue_common import ( @@ -29,8 +28,9 @@ from invokeai.app.services.session_queue.session_queue_common import ( calc_session_count, prepare_values_to_insert, ) -from invokeai.app.services.shared.sqlite import SqliteDatabase +from invokeai.app.services.shared.graph import Graph from invokeai.app.services.shared.pagination import CursorPaginatedResults +from invokeai.app.services.shared.sqlite import SqliteDatabase class SqliteSessionQueue(SessionQueueBase): diff --git a/invokeai/app/services/default_graphs.py b/invokeai/app/services/shared/default_graphs.py similarity index 90% rename from invokeai/app/services/default_graphs.py rename to invokeai/app/services/shared/default_graphs.py index ad8d220599..b2d0a1f0b6 100644 --- a/invokeai/app/services/default_graphs.py +++ b/invokeai/app/services/shared/default_graphs.py @@ -1,10 +1,11 @@ -from ..invocations.compel import CompelInvocation -from ..invocations.image import ImageNSFWBlurInvocation -from ..invocations.latent import DenoiseLatentsInvocation, LatentsToImageInvocation -from ..invocations.noise import NoiseInvocation -from ..invocations.primitives import IntegerInvocation +from invokeai.app.services.item_storage.item_storage_base import ItemStorageABC + +from ...invocations.compel import CompelInvocation +from ...invocations.image import ImageNSFWBlurInvocation +from ...invocations.latent import DenoiseLatentsInvocation, LatentsToImageInvocation +from ...invocations.noise import NoiseInvocation +from ...invocations.primitives import IntegerInvocation from .graph import Edge, EdgeConnection, ExposedNodeInput, ExposedNodeOutput, Graph, LibraryGraph -from .item_storage import ItemStorageABC default_text_to_image_graph_id = "539b2af5-2b4d-4d8c-8071-e54a3255fc74" diff --git a/invokeai/app/services/graph.py b/invokeai/app/services/shared/graph.py similarity index 99% rename from invokeai/app/services/graph.py rename to invokeai/app/services/shared/graph.py index ab479300fc..dab045af9d 100644 --- a/invokeai/app/services/graph.py +++ b/invokeai/app/services/shared/graph.py @@ -8,11 +8,9 @@ import networkx as nx from pydantic import BaseModel, root_validator, validator from pydantic.fields import Field -from invokeai.app.util.misc import uuid_string - # Importing * is bad karma but needed here for node detection -from ..invocations import * # noqa: F401 F403 -from ..invocations.baseinvocation import ( +from invokeai.app.invocations import * # noqa: F401 F403 +from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, Input, @@ -23,6 +21,7 @@ from ..invocations.baseinvocation import ( invocation, invocation_output, ) +from invokeai.app.util.misc import uuid_string # in 3.10 this would be "from types import NoneType" NoneType = type(None) diff --git a/invokeai/app/services/shared/sqlite.py b/invokeai/app/services/shared/sqlite.py index 6b3b86f25f..c41dbbe606 100644 --- a/invokeai/app/services/shared/sqlite.py +++ b/invokeai/app/services/shared/sqlite.py @@ -4,6 +4,8 @@ from logging import Logger from invokeai.app.services.config import InvokeAIAppConfig +sqlite_memory = ":memory:" + class SqliteDatabase: conn: sqlite3.Connection @@ -16,7 +18,7 @@ class SqliteDatabase: self._config = config if self._config.use_memory_db: - location = ":memory:" + location = sqlite_memory logger.info("Using in-memory database") else: db_path = self._config.db_path diff --git a/invokeai/app/services/urls/__init__.py b/invokeai/app/services/urls/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/app/services/urls/urls_base.py b/invokeai/app/services/urls/urls_base.py new file mode 100644 index 0000000000..c39ba055f3 --- /dev/null +++ b/invokeai/app/services/urls/urls_base.py @@ -0,0 +1,10 @@ +from abc import ABC, abstractmethod + + +class UrlServiceBase(ABC): + """Responsible for building URLs for resources.""" + + @abstractmethod + def get_image_url(self, image_name: str, thumbnail: bool = False) -> str: + """Gets the URL for an image or thumbnail.""" + pass diff --git a/invokeai/app/services/urls.py b/invokeai/app/services/urls/urls_default.py similarity index 64% rename from invokeai/app/services/urls.py rename to invokeai/app/services/urls/urls_default.py index 7688b3bdd3..801aeac560 100644 --- a/invokeai/app/services/urls.py +++ b/invokeai/app/services/urls/urls_default.py @@ -1,14 +1,6 @@ import os -from abc import ABC, abstractmethod - -class UrlServiceBase(ABC): - """Responsible for building URLs for resources.""" - - @abstractmethod - def get_image_url(self, image_name: str, thumbnail: bool = False) -> str: - """Gets the URL for an image or thumbnail.""" - pass +from .urls_base import UrlServiceBase class LocalUrlService(UrlServiceBase): diff --git a/invokeai/app/util/metadata.py b/invokeai/app/util/metadata.py index 5ca5f14e12..15951cb009 100644 --- a/invokeai/app/util/metadata.py +++ b/invokeai/app/util/metadata.py @@ -3,7 +3,7 @@ from typing import Optional from pydantic import ValidationError -from invokeai.app.services.graph import Edge +from invokeai.app.services.shared.graph import Edge def get_metadata_graph_from_raw_session(session_raw: str) -> Optional[dict]: diff --git a/invokeai/app/util/step_callback.py b/invokeai/app/util/step_callback.py index 6d4a857491..f166206d52 100644 --- a/invokeai/app/util/step_callback.py +++ b/invokeai/app/util/step_callback.py @@ -1,8 +1,7 @@ import torch from PIL import Image -from invokeai.app.models.exceptions import CanceledException -from invokeai.app.models.image import ProgressImage +from invokeai.app.services.invocation_processor.invocation_processor_common import CanceledException, ProgressImage from ...backend.model_management.models import BaseModelType from ...backend.stable_diffusion import PipelineIntermediateState diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index 2c14f458a7..101bcdf391 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -3,449 +3,444 @@ * Do not make direct changes to the file. */ - export type paths = { - "/api/v1/sessions/": { + '/api/v1/sessions/': { /** * List Sessions * @deprecated * @description Gets a list of sessions, optionally searching */ - get: operations["list_sessions"]; + get: operations['list_sessions']; /** * Create Session * @deprecated * @description Creates a new session, optionally initializing it with an invocation graph */ - post: operations["create_session"]; + post: operations['create_session']; }; - "/api/v1/sessions/{session_id}": { + '/api/v1/sessions/{session_id}': { /** * Get Session * @deprecated * @description Gets a session */ - get: operations["get_session"]; + get: operations['get_session']; }; - "/api/v1/sessions/{session_id}/nodes": { + '/api/v1/sessions/{session_id}/nodes': { /** * Add Node * @deprecated * @description Adds a node to the graph */ - post: operations["add_node"]; + post: operations['add_node']; }; - "/api/v1/sessions/{session_id}/nodes/{node_path}": { + '/api/v1/sessions/{session_id}/nodes/{node_path}': { /** * Update Node * @deprecated * @description Updates a node in the graph and removes all linked edges */ - put: operations["update_node"]; + put: operations['update_node']; /** * Delete Node * @deprecated * @description Deletes a node in the graph and removes all linked edges */ - delete: operations["delete_node"]; + delete: operations['delete_node']; }; - "/api/v1/sessions/{session_id}/edges": { + '/api/v1/sessions/{session_id}/edges': { /** * Add Edge * @deprecated * @description Adds an edge to the graph */ - post: operations["add_edge"]; + post: operations['add_edge']; }; - "/api/v1/sessions/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}": { + '/api/v1/sessions/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}': { /** * Delete Edge * @deprecated * @description Deletes an edge from the graph */ - delete: operations["delete_edge"]; + delete: operations['delete_edge']; }; - "/api/v1/sessions/{session_id}/invoke": { + '/api/v1/sessions/{session_id}/invoke': { /** * Invoke Session * @deprecated * @description Invokes a session */ - put: operations["invoke_session"]; + put: operations['invoke_session']; /** * Cancel Session Invoke * @deprecated * @description Invokes a session */ - delete: operations["cancel_session_invoke"]; + delete: operations['cancel_session_invoke']; }; - "/api/v1/utilities/dynamicprompts": { + '/api/v1/utilities/dynamicprompts': { /** * Parse Dynamicprompts * @description Creates a batch process */ - post: operations["parse_dynamicprompts"]; + post: operations['parse_dynamicprompts']; }; - "/api/v1/models/": { + '/api/v1/models/': { /** * List Models * @description Gets a list of models */ - get: operations["list_models"]; + get: operations['list_models']; }; - "/api/v1/models/{base_model}/{model_type}/{model_name}": { + '/api/v1/models/{base_model}/{model_type}/{model_name}': { /** * Delete Model * @description Delete Model */ - delete: operations["del_model"]; + delete: operations['del_model']; /** * Update Model * @description Update model contents with a new config. If the model name or base fields are changed, then the model is renamed. */ - patch: operations["update_model"]; + patch: operations['update_model']; }; - "/api/v1/models/import": { + '/api/v1/models/import': { /** * Import Model * @description Add a model using its local path, repo_id, or remote URL. Model characteristics will be probed and configured automatically */ - post: operations["import_model"]; + post: operations['import_model']; }; - "/api/v1/models/add": { + '/api/v1/models/add': { /** * Add Model * @description Add a model using the configuration information appropriate for its type. Only local models can be added by path */ - post: operations["add_model"]; + post: operations['add_model']; }; - "/api/v1/models/convert/{base_model}/{model_type}/{model_name}": { + '/api/v1/models/convert/{base_model}/{model_type}/{model_name}': { /** * Convert Model * @description Convert a checkpoint model into a diffusers model, optionally saving to the indicated destination directory, or `models` if none. */ - put: operations["convert_model"]; + put: operations['convert_model']; }; - "/api/v1/models/search": { + '/api/v1/models/search': { /** Search For Models */ - get: operations["search_for_models"]; + get: operations['search_for_models']; }; - "/api/v1/models/ckpt_confs": { + '/api/v1/models/ckpt_confs': { /** * List Ckpt Configs * @description Return a list of the legacy checkpoint configuration files stored in `ROOT/configs/stable-diffusion`, relative to ROOT. */ - get: operations["list_ckpt_configs"]; + get: operations['list_ckpt_configs']; }; - "/api/v1/models/sync": { + '/api/v1/models/sync': { /** * Sync To Config * @description Call after making changes to models.yaml, autoimport directories or models directory to synchronize * in-memory data structures with disk data structures. */ - post: operations["sync_to_config"]; + post: operations['sync_to_config']; }; - "/api/v1/models/merge/{base_model}": { + '/api/v1/models/merge/{base_model}': { /** * Merge Models * @description Convert a checkpoint model into a diffusers model */ - put: operations["merge_models"]; + put: operations['merge_models']; }; - "/api/v1/images/upload": { + '/api/v1/images/upload': { /** * Upload Image * @description Uploads an image */ - post: operations["upload_image"]; + post: operations['upload_image']; }; - "/api/v1/images/i/{image_name}": { + '/api/v1/images/i/{image_name}': { /** * Get Image Dto * @description Gets an image's DTO */ - get: operations["get_image_dto"]; + get: operations['get_image_dto']; /** * Delete Image * @description Deletes an image */ - delete: operations["delete_image"]; + delete: operations['delete_image']; /** * Update Image * @description Updates an image */ - patch: operations["update_image"]; + patch: operations['update_image']; }; - "/api/v1/images/clear-intermediates": { + '/api/v1/images/clear-intermediates': { /** * Clear Intermediates * @description Clears all intermediates */ - post: operations["clear_intermediates"]; + post: operations['clear_intermediates']; }; - "/api/v1/images/i/{image_name}/metadata": { + '/api/v1/images/i/{image_name}/metadata': { /** * Get Image Metadata * @description Gets an image's metadata */ - get: operations["get_image_metadata"]; + get: operations['get_image_metadata']; }; - "/api/v1/images/i/{image_name}/full": { + '/api/v1/images/i/{image_name}/full': { /** * Get Image Full * @description Gets a full-resolution image file */ - get: operations["get_image_full"]; + get: operations['get_image_full']; /** * Get Image Full * @description Gets a full-resolution image file */ - head: operations["get_image_full"]; + head: operations['get_image_full']; }; - "/api/v1/images/i/{image_name}/thumbnail": { + '/api/v1/images/i/{image_name}/thumbnail': { /** * Get Image Thumbnail * @description Gets a thumbnail image file */ - get: operations["get_image_thumbnail"]; + get: operations['get_image_thumbnail']; }; - "/api/v1/images/i/{image_name}/urls": { + '/api/v1/images/i/{image_name}/urls': { /** * Get Image Urls * @description Gets an image and thumbnail URL */ - get: operations["get_image_urls"]; + get: operations['get_image_urls']; }; - "/api/v1/images/": { + '/api/v1/images/': { /** * List Image Dtos * @description Gets a list of image DTOs */ - get: operations["list_image_dtos"]; + get: operations['list_image_dtos']; }; - "/api/v1/images/delete": { + '/api/v1/images/delete': { /** Delete Images From List */ - post: operations["delete_images_from_list"]; + post: operations['delete_images_from_list']; }; - "/api/v1/images/star": { + '/api/v1/images/star': { /** Star Images In List */ - post: operations["star_images_in_list"]; + post: operations['star_images_in_list']; }; - "/api/v1/images/unstar": { + '/api/v1/images/unstar': { /** Unstar Images In List */ - post: operations["unstar_images_in_list"]; + post: operations['unstar_images_in_list']; }; - "/api/v1/images/download": { - /** Download Images From List */ - post: operations["download_images_from_list"]; - }; - "/api/v1/boards/": { + '/api/v1/boards/': { /** * List Boards * @description Gets a list of boards */ - get: operations["list_boards"]; + get: operations['list_boards']; /** * Create Board * @description Creates a board */ - post: operations["create_board"]; + post: operations['create_board']; }; - "/api/v1/boards/{board_id}": { + '/api/v1/boards/{board_id}': { /** * Get Board * @description Gets a board */ - get: operations["get_board"]; + get: operations['get_board']; /** * Delete Board * @description Deletes a board */ - delete: operations["delete_board"]; + delete: operations['delete_board']; /** * Update Board * @description Updates a board */ - patch: operations["update_board"]; + patch: operations['update_board']; }; - "/api/v1/boards/{board_id}/image_names": { + '/api/v1/boards/{board_id}/image_names': { /** * List All Board Image Names * @description Gets a list of images for a board */ - get: operations["list_all_board_image_names"]; + get: operations['list_all_board_image_names']; }; - "/api/v1/board_images/": { + '/api/v1/board_images/': { /** * Add Image To Board * @description Creates a board_image */ - post: operations["add_image_to_board"]; + post: operations['add_image_to_board']; /** * Remove Image From Board * @description Removes an image from its board, if it had one */ - delete: operations["remove_image_from_board"]; + delete: operations['remove_image_from_board']; }; - "/api/v1/board_images/batch": { + '/api/v1/board_images/batch': { /** * Add Images To Board * @description Adds a list of images to a board */ - post: operations["add_images_to_board"]; + post: operations['add_images_to_board']; }; - "/api/v1/board_images/batch/delete": { + '/api/v1/board_images/batch/delete': { /** * Remove Images From Board * @description Removes a list of images from their board, if they had one */ - post: operations["remove_images_from_board"]; + post: operations['remove_images_from_board']; }; - "/api/v1/app/version": { + '/api/v1/app/version': { /** Get Version */ - get: operations["app_version"]; + get: operations['app_version']; }; - "/api/v1/app/config": { + '/api/v1/app/config': { /** Get Config */ - get: operations["get_config"]; + get: operations['get_config']; }; - "/api/v1/app/logging": { + '/api/v1/app/logging': { /** * Get Log Level * @description Returns the log level */ - get: operations["get_log_level"]; + get: operations['get_log_level']; /** * Set Log Level * @description Sets the log verbosity level */ - post: operations["set_log_level"]; + post: operations['set_log_level']; }; - "/api/v1/app/invocation_cache": { + '/api/v1/app/invocation_cache': { /** * Clear Invocation Cache * @description Clears the invocation cache */ - delete: operations["clear_invocation_cache"]; + delete: operations['clear_invocation_cache']; }; - "/api/v1/app/invocation_cache/enable": { + '/api/v1/app/invocation_cache/enable': { /** * Enable Invocation Cache * @description Clears the invocation cache */ - put: operations["enable_invocation_cache"]; + put: operations['enable_invocation_cache']; }; - "/api/v1/app/invocation_cache/disable": { + '/api/v1/app/invocation_cache/disable': { /** * Disable Invocation Cache * @description Clears the invocation cache */ - put: operations["disable_invocation_cache"]; + put: operations['disable_invocation_cache']; }; - "/api/v1/app/invocation_cache/status": { + '/api/v1/app/invocation_cache/status': { /** * Get Invocation Cache Status * @description Clears the invocation cache */ - get: operations["get_invocation_cache_status"]; + get: operations['get_invocation_cache_status']; }; - "/api/v1/queue/{queue_id}/enqueue_graph": { + '/api/v1/queue/{queue_id}/enqueue_graph': { /** * Enqueue Graph * @description Enqueues a graph for single execution. */ - post: operations["enqueue_graph"]; + post: operations['enqueue_graph']; }; - "/api/v1/queue/{queue_id}/enqueue_batch": { + '/api/v1/queue/{queue_id}/enqueue_batch': { /** * Enqueue Batch * @description Processes a batch and enqueues the output graphs for execution. */ - post: operations["enqueue_batch"]; + post: operations['enqueue_batch']; }; - "/api/v1/queue/{queue_id}/list": { + '/api/v1/queue/{queue_id}/list': { /** * List Queue Items * @description Gets all queue items (without graphs) */ - get: operations["list_queue_items"]; + get: operations['list_queue_items']; }; - "/api/v1/queue/{queue_id}/processor/resume": { + '/api/v1/queue/{queue_id}/processor/resume': { /** * Resume * @description Resumes session processor */ - put: operations["resume"]; + put: operations['resume']; }; - "/api/v1/queue/{queue_id}/processor/pause": { + '/api/v1/queue/{queue_id}/processor/pause': { /** * Pause * @description Pauses session processor */ - put: operations["pause"]; + put: operations['pause']; }; - "/api/v1/queue/{queue_id}/cancel_by_batch_ids": { + '/api/v1/queue/{queue_id}/cancel_by_batch_ids': { /** * Cancel By Batch Ids * @description Immediately cancels all queue items from the given batch ids */ - put: operations["cancel_by_batch_ids"]; + put: operations['cancel_by_batch_ids']; }; - "/api/v1/queue/{queue_id}/clear": { + '/api/v1/queue/{queue_id}/clear': { /** * Clear * @description Clears the queue entirely, immediately canceling the currently-executing session */ - put: operations["clear"]; + put: operations['clear']; }; - "/api/v1/queue/{queue_id}/prune": { + '/api/v1/queue/{queue_id}/prune': { /** * Prune * @description Prunes all completed or errored queue items */ - put: operations["prune"]; + put: operations['prune']; }; - "/api/v1/queue/{queue_id}/current": { + '/api/v1/queue/{queue_id}/current': { /** * Get Current Queue Item * @description Gets the currently execution queue item */ - get: operations["get_current_queue_item"]; + get: operations['get_current_queue_item']; }; - "/api/v1/queue/{queue_id}/next": { + '/api/v1/queue/{queue_id}/next': { /** * Get Next Queue Item * @description Gets the next queue item, without executing it */ - get: operations["get_next_queue_item"]; + get: operations['get_next_queue_item']; }; - "/api/v1/queue/{queue_id}/status": { + '/api/v1/queue/{queue_id}/status': { /** * Get Queue Status * @description Gets the status of the session queue */ - get: operations["get_queue_status"]; + get: operations['get_queue_status']; }; - "/api/v1/queue/{queue_id}/b/{batch_id}/status": { + '/api/v1/queue/{queue_id}/b/{batch_id}/status': { /** * Get Batch Status * @description Gets the status of the session queue */ - get: operations["get_batch_status"]; + get: operations['get_batch_status']; }; - "/api/v1/queue/{queue_id}/i/{item_id}": { + '/api/v1/queue/{queue_id}/i/{item_id}': { /** * Get Queue Item * @description Gets a queue item */ - get: operations["get_queue_item"]; + get: operations['get_queue_item']; }; - "/api/v1/queue/{queue_id}/i/{item_id}/cancel": { + '/api/v1/queue/{queue_id}/i/{item_id}/cancel': { /** * Cancel Queue Item * @description Deletes a queue item */ - put: operations["cancel_queue_item"]; + put: operations['cancel_queue_item']; }; }; @@ -510,7 +505,7 @@ export type components = { * @default add * @enum {string} */ - type: "add"; + type: 'add'; }; /** * AppConfig @@ -526,7 +521,7 @@ export type components = { * Upscaling Methods * @description List of upscaling methods */ - upscaling_methods: components["schemas"]["Upscaler"][]; + upscaling_methods: components['schemas']['Upscaler'][]; /** * Nsfw Methods * @description List of NSFW checking methods @@ -554,7 +549,7 @@ export type components = { * @description An enumeration. * @enum {string} */ - BaseModelType: "any" | "sd-1" | "sd-2" | "sdxl" | "sdxl-refiner"; + BaseModelType: 'any' | 'sd-1' | 'sd-2' | 'sdxl' | 'sdxl-refiner'; /** Batch */ Batch: { /** @@ -566,12 +561,12 @@ export type components = { * Data * @description The batch data collection. */ - data?: components["schemas"]["BatchDatum"][][]; + data?: components['schemas']['BatchDatum'][][]; /** * Graph * @description The graph to initialize the session with */ - graph: components["schemas"]["Graph"]; + graph: components['schemas']['Graph']; /** * Runs * @description Int stating how many times to iterate through all possible batch indices @@ -685,7 +680,7 @@ export type components = { * @default RGB * @enum {string} */ - mode?: "RGB" | "RGBA"; + mode?: 'RGB' | 'RGBA'; /** * Color * @description The color of the image @@ -696,13 +691,13 @@ export type components = { * "a": 255 * } */ - color?: components["schemas"]["ColorField"]; + color?: components['schemas']['ColorField']; /** * Type * @default blank_image * @enum {string} */ - type: "blank_image"; + type: 'blank_image'; }; /** * Blend Latents @@ -735,12 +730,12 @@ export type components = { * Latents A * @description Latents tensor */ - latents_a?: components["schemas"]["LatentsField"]; + latents_a?: components['schemas']['LatentsField']; /** * Latents B * @description Latents tensor */ - latents_b?: components["schemas"]["LatentsField"]; + latents_b?: components['schemas']['LatentsField']; /** * Alpha * @description Blending factor. 0.0 = use input A only, 1.0 = use input B only, 0.5 = 50% mix of input A and input B. @@ -752,7 +747,7 @@ export type components = { * @default lblend * @enum {string} */ - type: "lblend"; + type: 'lblend'; }; /** BoardChanges */ BoardChanges: { @@ -880,7 +875,7 @@ export type components = { * Batch * @description Batch to process */ - batch: components["schemas"]["Batch"]; + batch: components['schemas']['Batch']; /** * Prepend * @description Whether or not to prepend this batch in the queue @@ -894,7 +889,7 @@ export type components = { * Graph * @description The graph to enqueue */ - graph: components["schemas"]["Graph"]; + graph: components['schemas']['Graph']; /** * Prepend * @description Whether or not to prepend this batch in the queue @@ -914,7 +909,7 @@ export type components = { * @description Prediction type for SDv2 checkpoints and rare SDv1 checkpoints * @enum {string} */ - prediction_type?: "v_prediction" | "epsilon" | "sample"; + prediction_type?: 'v_prediction' | 'epsilon' | 'sample'; }; /** Body_merge_models */ Body_merge_models: { @@ -935,7 +930,7 @@ export type components = { */ alpha?: number; /** @description Interpolation method */ - interp: components["schemas"]["MergeInterpolationMethod"]; + interp: components['schemas']['MergeInterpolationMethod']; /** * Force * @description Force merging of models created with different versions of diffusers @@ -1045,7 +1040,7 @@ export type components = { * @default boolean_collection * @enum {string} */ - type: "boolean_collection"; + type: 'boolean_collection'; }; /** * BooleanCollectionOutput @@ -1062,7 +1057,7 @@ export type components = { * @default boolean_collection_output * @enum {string} */ - type: "boolean_collection_output"; + type: 'boolean_collection_output'; }; /** * Boolean Primitive @@ -1102,7 +1097,7 @@ export type components = { * @default boolean * @enum {string} */ - type: "boolean"; + type: 'boolean'; }; /** * BooleanOutput @@ -1119,18 +1114,18 @@ export type components = { * @default boolean_output * @enum {string} */ - type: "boolean_output"; + type: 'boolean_output'; }; /** CLIPVisionModelDiffusersConfig */ CLIPVisionModelDiffusersConfig: { /** Model Name */ model_name: string; - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** * Model Type * @enum {string} */ - model_type: "clip_vision"; + model_type: 'clip_vision'; /** Path */ path: string; /** Description */ @@ -1139,8 +1134,8 @@ export type components = { * Model Format * @enum {string} */ - model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + model_format: 'diffusers'; + error?: components['schemas']['ModelError']; }; /** CLIPVisionModelField */ CLIPVisionModelField: { @@ -1150,7 +1145,7 @@ export type components = { */ model_name: string; /** @description Base model (usually 'Any') */ - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; }; /** * CV2 Infill @@ -1183,13 +1178,13 @@ export type components = { * Image * @description The image to infill */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default infill_cv2 * @enum {string} */ - type: "infill_cv2"; + type: 'infill_cv2'; }; /** * CancelByBatchIDsResult @@ -1233,13 +1228,13 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default canny_image_processor * @enum {string} */ - type: "canny_image_processor"; + type: 'canny_image_processor'; /** * Low Threshold * @description The low threshold of the Canny pixel gradient (0-255) @@ -1270,12 +1265,12 @@ export type components = { * Tokenizer * @description Info to load tokenizer submodel */ - tokenizer: components["schemas"]["ModelInfo"]; + tokenizer: components['schemas']['ModelInfo']; /** * Text Encoder * @description Info to load text_encoder submodel */ - text_encoder: components["schemas"]["ModelInfo"]; + text_encoder: components['schemas']['ModelInfo']; /** * Skipped Layers * @description Number of skipped layers in text_encoder @@ -1285,7 +1280,7 @@ export type components = { * Loras * @description Loras to apply on model loading */ - loras: components["schemas"]["LoraInfo"][]; + loras: components['schemas']['LoraInfo'][]; }; /** * CLIP Skip @@ -1318,7 +1313,7 @@ export type components = { * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components['schemas']['ClipField']; /** * Skipped Layers * @description Number of layers to skip in text encoder @@ -1330,7 +1325,7 @@ export type components = { * @default clip_skip * @enum {string} */ - type: "clip_skip"; + type: 'clip_skip'; }; /** * ClipSkipInvocationOutput @@ -1341,13 +1336,13 @@ export type components = { * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components['schemas']['ClipField']; /** * Type * @default clip_skip_output * @enum {string} */ - type: "clip_skip_output"; + type: 'clip_skip_output'; }; /** * CollectInvocation @@ -1391,7 +1386,7 @@ export type components = { * @default collect * @enum {string} */ - type: "collect"; + type: 'collect'; }; /** * CollectInvocationOutput @@ -1410,7 +1405,7 @@ export type components = { * @default collect_output * @enum {string} */ - type: "collect_output"; + type: 'collect_output'; }; /** * ColorCollectionOutput @@ -1421,13 +1416,13 @@ export type components = { * Collection * @description The output colors */ - collection: components["schemas"]["ColorField"][]; + collection: components['schemas']['ColorField'][]; /** * Type * @default color_collection_output * @enum {string} */ - type: "color_collection_output"; + type: 'color_collection_output'; }; /** * Color Correct @@ -1461,17 +1456,17 @@ export type components = { * Image * @description The image to color-correct */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Reference * @description Reference image for color-correction */ - reference?: components["schemas"]["ImageField"]; + reference?: components['schemas']['ImageField']; /** * Mask * @description Mask to use when applying color-correction */ - mask?: components["schemas"]["ImageField"]; + mask?: components['schemas']['ImageField']; /** * Mask Blur Radius * @description Mask blur radius @@ -1483,7 +1478,7 @@ export type components = { * @default color_correct * @enum {string} */ - type: "color_correct"; + type: 'color_correct'; }; /** * ColorField @@ -1548,13 +1543,13 @@ export type components = { * "a": 255 * } */ - color?: components["schemas"]["ColorField"]; + color?: components['schemas']['ColorField']; /** * Type * @default color * @enum {string} */ - type: "color"; + type: 'color'; }; /** * Color Map Processor @@ -1587,13 +1582,13 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default color_map_image_processor * @enum {string} */ - type: "color_map_image_processor"; + type: 'color_map_image_processor'; /** * Color Map Tile Size * @description Tile size @@ -1610,13 +1605,13 @@ export type components = { * Color * @description The output color */ - color: components["schemas"]["ColorField"]; + color: components['schemas']['ColorField']; /** * Type * @default color_output * @enum {string} */ - type: "color_output"; + type: 'color_output'; }; /** * Prompt @@ -1656,12 +1651,12 @@ export type components = { * @default compel * @enum {string} */ - type: "compel"; + type: 'compel'; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components['schemas']['ClipField']; }; /** * Conditioning Collection Primitive @@ -1694,13 +1689,13 @@ export type components = { * Collection * @description The collection of conditioning tensors */ - collection?: components["schemas"]["ConditioningField"][]; + collection?: components['schemas']['ConditioningField'][]; /** * Type * @default conditioning_collection * @enum {string} */ - type: "conditioning_collection"; + type: 'conditioning_collection'; }; /** * ConditioningCollectionOutput @@ -1711,13 +1706,13 @@ export type components = { * Collection * @description The output conditioning tensors */ - collection: components["schemas"]["ConditioningField"][]; + collection: components['schemas']['ConditioningField'][]; /** * Type * @default conditioning_collection_output * @enum {string} */ - type: "conditioning_collection_output"; + type: 'conditioning_collection_output'; }; /** * ConditioningField @@ -1761,13 +1756,13 @@ export type components = { * Conditioning * @description Conditioning tensor */ - conditioning?: components["schemas"]["ConditioningField"]; + conditioning?: components['schemas']['ConditioningField']; /** * Type * @default conditioning * @enum {string} */ - type: "conditioning"; + type: 'conditioning'; }; /** * ConditioningOutput @@ -1778,13 +1773,13 @@ export type components = { * Conditioning * @description Conditioning tensor */ - conditioning: components["schemas"]["ConditioningField"]; + conditioning: components['schemas']['ConditioningField']; /** * Type * @default conditioning_output * @enum {string} */ - type: "conditioning_output"; + type: 'conditioning_output'; }; /** * Content Shuffle Processor @@ -1817,13 +1812,13 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default content_shuffle_image_processor * @enum {string} */ - type: "content_shuffle_image_processor"; + type: 'content_shuffle_image_processor'; /** * Detect Resolution * @description Pixel resolution for detection @@ -1861,12 +1856,12 @@ export type components = { * Image * @description The control image */ - image: components["schemas"]["ImageField"]; + image: components['schemas']['ImageField']; /** * Control Model * @description The ControlNet model to use */ - control_model: components["schemas"]["ControlNetModelField"]; + control_model: components['schemas']['ControlNetModelField']; /** * Control Weight * @description The weight given to the ControlNet @@ -1891,14 +1886,18 @@ export type components = { * @default balanced * @enum {string} */ - control_mode?: "balanced" | "more_prompt" | "more_control" | "unbalanced"; + control_mode?: 'balanced' | 'more_prompt' | 'more_control' | 'unbalanced'; /** * Resize Mode * @description The resize mode to use * @default just_resize * @enum {string} */ - resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; + resize_mode?: + | 'just_resize' + | 'crop_resize' + | 'fill_resize' + | 'just_resize_simple'; }; /** * ControlNet @@ -1931,12 +1930,12 @@ export type components = { * Image * @description The control image */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Control Model * @description ControlNet model to load */ - control_model: components["schemas"]["ControlNetModelField"]; + control_model: components['schemas']['ControlNetModelField']; /** * Control Weight * @description The weight given to the ControlNet @@ -1961,31 +1960,35 @@ export type components = { * @default balanced * @enum {string} */ - control_mode?: "balanced" | "more_prompt" | "more_control" | "unbalanced"; + control_mode?: 'balanced' | 'more_prompt' | 'more_control' | 'unbalanced'; /** * Resize Mode * @description The resize mode used * @default just_resize * @enum {string} */ - resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; + resize_mode?: + | 'just_resize' + | 'crop_resize' + | 'fill_resize' + | 'just_resize_simple'; /** * Type * @default controlnet * @enum {string} */ - type: "controlnet"; + type: 'controlnet'; }; /** ControlNetModelCheckpointConfig */ ControlNetModelCheckpointConfig: { /** Model Name */ model_name: string; - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** * Model Type * @enum {string} */ - model_type: "controlnet"; + model_type: 'controlnet'; /** Path */ path: string; /** Description */ @@ -1994,8 +1997,8 @@ export type components = { * Model Format * @enum {string} */ - model_format: "checkpoint"; - error?: components["schemas"]["ModelError"]; + model_format: 'checkpoint'; + error?: components['schemas']['ModelError']; /** Config */ config: string; }; @@ -2003,12 +2006,12 @@ export type components = { ControlNetModelDiffusersConfig: { /** Model Name */ model_name: string; - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** * Model Type * @enum {string} */ - model_type: "controlnet"; + model_type: 'controlnet'; /** Path */ path: string; /** Description */ @@ -2017,8 +2020,8 @@ export type components = { * Model Format * @enum {string} */ - model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + model_format: 'diffusers'; + error?: components['schemas']['ModelError']; }; /** * ControlNetModelField @@ -2031,7 +2034,7 @@ export type components = { */ model_name: string; /** @description Base model */ - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; }; /** * ControlOutput @@ -2042,13 +2045,13 @@ export type components = { * Control * @description ControlNet(s) to apply */ - control: components["schemas"]["ControlField"]; + control: components['schemas']['ControlField']; /** * Type * @default control_output * @enum {string} */ - type: "control_output"; + type: 'control_output'; }; /** * CoreMetadata @@ -2125,32 +2128,32 @@ export type components = { * Model * @description The main model used for inference */ - model: components["schemas"]["MainModelField"]; + model: components['schemas']['MainModelField']; /** * Controlnets * @description The ControlNets used for inference */ - controlnets: components["schemas"]["ControlField"][]; + controlnets: components['schemas']['ControlField'][]; /** * Ipadapters * @description The IP Adapters used for inference */ - ipAdapters: components["schemas"]["IPAdapterMetadataField"][]; + ipAdapters: components['schemas']['IPAdapterMetadataField'][]; /** * T2Iadapters * @description The IP Adapters used for inference */ - t2iAdapters: components["schemas"]["T2IAdapterField"][]; + t2iAdapters: components['schemas']['T2IAdapterField'][]; /** * Loras * @description The LoRAs used for inference */ - loras: components["schemas"]["LoRAMetadataField"][]; + loras: components['schemas']['LoRAMetadataField'][]; /** * Vae * @description The VAE used for decoding, if the main model's default was not used */ - vae?: components["schemas"]["VAEModelField"]; + vae?: components['schemas']['VAEModelField']; /** * Strength * @description The strength used for latents-to-latents @@ -2175,7 +2178,7 @@ export type components = { * Refiner Model * @description The SDXL Refiner model used */ - refiner_model?: components["schemas"]["MainModelField"]; + refiner_model?: components['schemas']['MainModelField']; /** * Refiner Cfg Scale * @description The classifier-free guidance scale parameter used for the refiner @@ -2238,17 +2241,17 @@ export type components = { * Vae * @description VAE */ - vae?: components["schemas"]["VaeField"]; + vae?: components['schemas']['VaeField']; /** * Image * @description Image which will be masked */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Mask * @description The mask to use when pasting */ - mask?: components["schemas"]["ImageField"]; + mask?: components['schemas']['ImageField']; /** * Tiled * @description Processing using overlapping tiles (reduce memory consumption) @@ -2266,11 +2269,12 @@ export type components = { * @default create_denoise_mask * @enum {string} */ - type: "create_denoise_mask"; + type: 'create_denoise_mask'; }; /** * CursorPaginatedResults[SessionQueueItemDTO] * @description Cursor-paginated results + * Generic must be a Pydantic model */ CursorPaginatedResults_SessionQueueItemDTO_: { /** @@ -2287,7 +2291,7 @@ export type components = { * Items * @description Items */ - items: components["schemas"]["SessionQueueItemDTO"][]; + items: components['schemas']['SessionQueueItemDTO'][]; }; /** * OpenCV Inpaint @@ -2320,18 +2324,18 @@ export type components = { * Image * @description The image to inpaint */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Mask * @description The mask to use when inpainting */ - mask?: components["schemas"]["ImageField"]; + mask?: components['schemas']['ImageField']; /** * Type * @default cv_inpaint * @enum {string} */ - type: "cv_inpaint"; + type: 'cv_inpaint'; }; /** DeleteBoardResult */ DeleteBoardResult: { @@ -2387,7 +2391,7 @@ export type components = { * Noise * @description Noise tensor */ - noise?: components["schemas"]["LatentsField"]; + noise?: components['schemas']['LatentsField']; /** * Steps * @description Number of steps to run @@ -2418,50 +2422,76 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; + scheduler?: + | 'ddim' + | 'ddpm' + | 'deis' + | 'lms' + | 'lms_k' + | 'pndm' + | 'heun' + | 'heun_k' + | 'euler' + | 'euler_k' + | 'euler_a' + | 'kdpm_2' + | 'kdpm_2_a' + | 'dpmpp_2s' + | 'dpmpp_2s_k' + | 'dpmpp_2m' + | 'dpmpp_2m_k' + | 'dpmpp_2m_sde' + | 'dpmpp_2m_sde_k' + | 'dpmpp_sde' + | 'dpmpp_sde_k' + | 'unipc'; /** Control */ - control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][]; + control?: + | components['schemas']['ControlField'] + | components['schemas']['ControlField'][]; /** * IP-Adapter * @description IP-Adapter to apply */ - ip_adapter?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][]; + ip_adapter?: components['schemas']['IPAdapterField']; /** * T2I-Adapter * @description T2I-Adapter(s) to apply */ - t2i_adapter?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][]; + t2i_adapter?: + | components['schemas']['T2IAdapterField'] + | components['schemas']['T2IAdapterField'][]; /** * Latents * @description Latents tensor */ - latents?: components["schemas"]["LatentsField"]; + latents?: components['schemas']['LatentsField']; /** * Denoise Mask * @description The mask to use for the operation */ - denoise_mask?: components["schemas"]["DenoiseMaskField"]; + denoise_mask?: components['schemas']['DenoiseMaskField']; /** * Type * @default denoise_latents * @enum {string} */ - type: "denoise_latents"; + type: 'denoise_latents'; /** * Positive Conditioning * @description Positive conditioning tensor */ - positive_conditioning?: components["schemas"]["ConditioningField"]; + positive_conditioning?: components['schemas']['ConditioningField']; /** * Negative Conditioning * @description Negative conditioning tensor */ - negative_conditioning?: components["schemas"]["ConditioningField"]; + negative_conditioning?: components['schemas']['ConditioningField']; /** * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components['schemas']['UNetField']; }; /** * DenoiseMaskField @@ -2488,13 +2518,13 @@ export type components = { * Denoise Mask * @description Mask for denoise model run */ - denoise_mask: components["schemas"]["DenoiseMaskField"]; + denoise_mask: components['schemas']['DenoiseMaskField']; /** * Type * @default denoise_mask_output * @enum {string} */ - type: "denoise_mask_output"; + type: 'denoise_mask_output'; }; /** * Divide Integers @@ -2540,7 +2570,7 @@ export type components = { * @default div * @enum {string} */ - type: "div"; + type: 'div'; }; /** * Dynamic Prompt @@ -2591,7 +2621,7 @@ export type components = { * @default dynamic_prompt * @enum {string} */ - type: "dynamic_prompt"; + type: 'dynamic_prompt'; }; /** DynamicPromptsResponse */ DynamicPromptsResponse: { @@ -2631,14 +2661,18 @@ export type components = { * Image * @description The input image */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Model Name * @description The Real-ESRGAN model to use * @default RealESRGAN_x4plus.pth * @enum {string} */ - model_name?: "RealESRGAN_x4plus.pth" | "RealESRGAN_x4plus_anime_6B.pth" | "ESRGAN_SRx4_DF2KOST_official-ff704c30.pth" | "RealESRGAN_x2plus.pth"; + model_name?: + | 'RealESRGAN_x4plus.pth' + | 'RealESRGAN_x4plus_anime_6B.pth' + | 'ESRGAN_SRx4_DF2KOST_official-ff704c30.pth' + | 'RealESRGAN_x2plus.pth'; /** * Tile Size * @description Tile size for tiled ESRGAN upscaling (0=tiling disabled) @@ -2650,7 +2684,7 @@ export type components = { * @default esrgan * @enum {string} */ - type: "esrgan"; + type: 'esrgan'; }; /** Edge */ Edge: { @@ -2658,12 +2692,12 @@ export type components = { * Source * @description The connection for the edge's from node and field */ - source: components["schemas"]["EdgeConnection"]; + source: components['schemas']['EdgeConnection']; /** * Destination * @description The connection for the edge's to node and field */ - destination: components["schemas"]["EdgeConnection"]; + destination: components['schemas']['EdgeConnection']; }; /** EdgeConnection */ EdgeConnection: { @@ -2699,7 +2733,7 @@ export type components = { * Batch * @description The batch that was enqueued */ - batch: components["schemas"]["Batch"]; + batch: components['schemas']['Batch']; /** * Priority * @description The priority of the enqueued batch @@ -2722,7 +2756,7 @@ export type components = { * Batch * @description The batch that was enqueued */ - batch: components["schemas"]["Batch"]; + batch: components['schemas']['Batch']; /** * Priority * @description The priority of the enqueued batch @@ -2732,7 +2766,7 @@ export type components = { * Queue Item * @description The queue item that was enqueued */ - queue_item: components["schemas"]["SessionQueueItemDTO"]; + queue_item: components['schemas']['SessionQueueItemDTO']; }; /** * FaceIdentifier @@ -2765,7 +2799,7 @@ export type components = { * Image * @description Image to face detect */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Minimum Confidence * @description Minimum confidence for face detection (lower if detection is failing) @@ -2783,7 +2817,7 @@ export type components = { * @default face_identifier * @enum {string} */ - type: "face_identifier"; + type: 'face_identifier'; }; /** * FaceMask @@ -2816,7 +2850,7 @@ export type components = { * Image * @description Image to face detect */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Face Ids * @description Comma-separated list of face ids to mask eg '0,2,7'. Numbered from 0. Leave empty to mask all. Find face IDs with FaceIdentifier node. @@ -2858,7 +2892,7 @@ export type components = { * @default face_mask_detection * @enum {string} */ - type: "face_mask_detection"; + type: 'face_mask_detection'; }; /** * FaceMaskOutput @@ -2869,7 +2903,7 @@ export type components = { * Image * @description The output image */ - image: components["schemas"]["ImageField"]; + image: components['schemas']['ImageField']; /** * Width * @description The width of the image in pixels @@ -2885,12 +2919,12 @@ export type components = { * @default face_mask_output * @enum {string} */ - type: "face_mask_output"; + type: 'face_mask_output'; /** * Mask * @description The output mask */ - mask: components["schemas"]["ImageField"]; + mask: components['schemas']['ImageField']; }; /** * FaceOff @@ -2923,7 +2957,7 @@ export type components = { * Image * @description Image for face detection */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Face Id * @description The face ID to process, numbered from 0. Multiple faces not supported. Find a face's ID with FaceIdentifier node. @@ -2965,7 +2999,7 @@ export type components = { * @default face_off * @enum {string} */ - type: "face_off"; + type: 'face_off'; }; /** * FaceOffOutput @@ -2976,7 +3010,7 @@ export type components = { * Image * @description The output image */ - image: components["schemas"]["ImageField"]; + image: components['schemas']['ImageField']; /** * Width * @description The width of the image in pixels @@ -2992,12 +3026,12 @@ export type components = { * @default face_off_output * @enum {string} */ - type: "face_off_output"; + type: 'face_off_output'; /** * Mask * @description The output mask */ - mask: components["schemas"]["ImageField"]; + mask: components['schemas']['ImageField']; /** * X * @description The x coordinate of the bounding box's left side @@ -3046,7 +3080,7 @@ export type components = { * @default float_collection * @enum {string} */ - type: "float_collection"; + type: 'float_collection'; }; /** * FloatCollectionOutput @@ -3063,7 +3097,7 @@ export type components = { * @default float_collection_output * @enum {string} */ - type: "float_collection_output"; + type: 'float_collection_output'; }; /** * Float Primitive @@ -3103,7 +3137,7 @@ export type components = { * @default float * @enum {string} */ - type: "float"; + type: 'float'; }; /** * Float Range @@ -3155,7 +3189,7 @@ export type components = { * @default float_range * @enum {string} */ - type: "float_range"; + type: 'float_range'; }; /** * Float Math @@ -3190,7 +3224,16 @@ export type components = { * @default ADD * @enum {string} */ - operation?: "ADD" | "SUB" | "MUL" | "DIV" | "EXP" | "ABS" | "SQRT" | "MIN" | "MAX"; + operation?: + | 'ADD' + | 'SUB' + | 'MUL' + | 'DIV' + | 'EXP' + | 'ABS' + | 'SQRT' + | 'MIN' + | 'MAX'; /** * A * @description The first number @@ -3208,7 +3251,7 @@ export type components = { * @default float_math * @enum {string} */ - type: "float_math"; + type: 'float_math'; }; /** * FloatOutput @@ -3225,7 +3268,7 @@ export type components = { * @default float_output * @enum {string} */ - type: "float_output"; + type: 'float_output'; }; /** * Float To Integer @@ -3272,13 +3315,13 @@ export type components = { * @default Nearest * @enum {string} */ - method?: "Nearest" | "Floor" | "Ceiling" | "Truncate"; + method?: 'Nearest' | 'Floor' | 'Ceiling' | 'Truncate'; /** * Type * @default float_to_int * @enum {string} */ - type: "float_to_int"; + type: 'float_to_int'; }; /** Graph */ Graph: { @@ -3292,13 +3335,125 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; + [key: string]: + | components['schemas']['BooleanInvocation'] + | components['schemas']['BooleanCollectionInvocation'] + | components['schemas']['IntegerInvocation'] + | components['schemas']['IntegerCollectionInvocation'] + | components['schemas']['FloatInvocation'] + | components['schemas']['FloatCollectionInvocation'] + | components['schemas']['StringInvocation'] + | components['schemas']['StringCollectionInvocation'] + | components['schemas']['ImageInvocation'] + | components['schemas']['ImageCollectionInvocation'] + | components['schemas']['LatentsInvocation'] + | components['schemas']['LatentsCollectionInvocation'] + | components['schemas']['ColorInvocation'] + | components['schemas']['ConditioningInvocation'] + | components['schemas']['ConditioningCollectionInvocation'] + | components['schemas']['ControlNetInvocation'] + | components['schemas']['ImageProcessorInvocation'] + | components['schemas']['MainModelLoaderInvocation'] + | components['schemas']['LoraLoaderInvocation'] + | components['schemas']['SDXLLoraLoaderInvocation'] + | components['schemas']['VaeLoaderInvocation'] + | components['schemas']['SeamlessModeInvocation'] + | components['schemas']['SDXLModelLoaderInvocation'] + | components['schemas']['SDXLRefinerModelLoaderInvocation'] + | components['schemas']['MetadataAccumulatorInvocation'] + | components['schemas']['IPAdapterInvocation'] + | components['schemas']['CompelInvocation'] + | components['schemas']['SDXLCompelPromptInvocation'] + | components['schemas']['SDXLRefinerCompelPromptInvocation'] + | components['schemas']['ClipSkipInvocation'] + | components['schemas']['SchedulerInvocation'] + | components['schemas']['CreateDenoiseMaskInvocation'] + | components['schemas']['DenoiseLatentsInvocation'] + | components['schemas']['LatentsToImageInvocation'] + | components['schemas']['ResizeLatentsInvocation'] + | components['schemas']['ScaleLatentsInvocation'] + | components['schemas']['ImageToLatentsInvocation'] + | components['schemas']['BlendLatentsInvocation'] + | components['schemas']['ONNXPromptInvocation'] + | components['schemas']['ONNXTextToLatentsInvocation'] + | components['schemas']['ONNXLatentsToImageInvocation'] + | components['schemas']['OnnxModelLoaderInvocation'] + | components['schemas']['ShowImageInvocation'] + | components['schemas']['BlankImageInvocation'] + | components['schemas']['ImageCropInvocation'] + | components['schemas']['ImagePasteInvocation'] + | components['schemas']['MaskFromAlphaInvocation'] + | components['schemas']['ImageMultiplyInvocation'] + | components['schemas']['ImageChannelInvocation'] + | components['schemas']['ImageConvertInvocation'] + | components['schemas']['ImageBlurInvocation'] + | components['schemas']['ImageResizeInvocation'] + | components['schemas']['ImageScaleInvocation'] + | components['schemas']['ImageLerpInvocation'] + | components['schemas']['ImageInverseLerpInvocation'] + | components['schemas']['ImageNSFWBlurInvocation'] + | components['schemas']['ImageWatermarkInvocation'] + | components['schemas']['MaskEdgeInvocation'] + | components['schemas']['MaskCombineInvocation'] + | components['schemas']['ColorCorrectInvocation'] + | components['schemas']['ImageHueAdjustmentInvocation'] + | components['schemas']['ImageChannelOffsetInvocation'] + | components['schemas']['ImageChannelMultiplyInvocation'] + | components['schemas']['SaveImageInvocation'] + | components['schemas']['DynamicPromptInvocation'] + | components['schemas']['PromptsFromFileInvocation'] + | components['schemas']['CvInpaintInvocation'] + | components['schemas']['FloatLinearRangeInvocation'] + | components['schemas']['StepParamEasingInvocation'] + | components['schemas']['AddInvocation'] + | components['schemas']['SubtractInvocation'] + | components['schemas']['MultiplyInvocation'] + | components['schemas']['DivideInvocation'] + | components['schemas']['RandomIntInvocation'] + | components['schemas']['FloatToIntegerInvocation'] + | components['schemas']['RoundInvocation'] + | components['schemas']['IntegerMathInvocation'] + | components['schemas']['FloatMathInvocation'] + | components['schemas']['NoiseInvocation'] + | components['schemas']['RangeInvocation'] + | components['schemas']['RangeOfSizeInvocation'] + | components['schemas']['RandomRangeInvocation'] + | components['schemas']['ESRGANInvocation'] + | components['schemas']['StringSplitNegInvocation'] + | components['schemas']['StringSplitInvocation'] + | components['schemas']['StringJoinInvocation'] + | components['schemas']['StringJoinThreeInvocation'] + | components['schemas']['StringReplaceInvocation'] + | components['schemas']['InfillColorInvocation'] + | components['schemas']['InfillTileInvocation'] + | components['schemas']['InfillPatchMatchInvocation'] + | components['schemas']['LaMaInfillInvocation'] + | components['schemas']['CV2InfillInvocation'] + | components['schemas']['GraphInvocation'] + | components['schemas']['IterateInvocation'] + | components['schemas']['CollectInvocation'] + | components['schemas']['CannyImageProcessorInvocation'] + | components['schemas']['HedImageProcessorInvocation'] + | components['schemas']['LineartImageProcessorInvocation'] + | components['schemas']['LineartAnimeImageProcessorInvocation'] + | components['schemas']['OpenposeImageProcessorInvocation'] + | components['schemas']['MidasDepthImageProcessorInvocation'] + | components['schemas']['NormalbaeImageProcessorInvocation'] + | components['schemas']['MlsdImageProcessorInvocation'] + | components['schemas']['PidiImageProcessorInvocation'] + | components['schemas']['ContentShuffleImageProcessorInvocation'] + | components['schemas']['ZoeDepthImageProcessorInvocation'] + | components['schemas']['MediapipeFaceProcessorInvocation'] + | components['schemas']['LeresImageProcessorInvocation'] + | components['schemas']['TileResamplerProcessorInvocation'] + | components['schemas']['SegmentAnythingProcessorInvocation'] + | components['schemas']['ColorMapImageProcessorInvocation']; }; /** * Edges * @description The connections between nodes and their fields in this graph */ - edges?: components["schemas"]["Edge"][]; + edges?: components['schemas']['Edge'][]; }; /** * GraphExecutionState @@ -3314,12 +3469,12 @@ export type components = { * Graph * @description The graph being executed */ - graph: components["schemas"]["Graph"]; + graph: components['schemas']['Graph']; /** * Execution Graph * @description The expanded graph of activated and executed nodes */ - execution_graph: components["schemas"]["Graph"]; + execution_graph: components['schemas']['Graph']; /** * Executed * @description The set of node ids that have been executed @@ -3335,7 +3490,43 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["BooleanOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["String2Output"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"]; + [key: string]: + | components['schemas']['BooleanOutput'] + | components['schemas']['BooleanCollectionOutput'] + | components['schemas']['IntegerOutput'] + | components['schemas']['IntegerCollectionOutput'] + | components['schemas']['FloatOutput'] + | components['schemas']['FloatCollectionOutput'] + | components['schemas']['StringOutput'] + | components['schemas']['StringCollectionOutput'] + | components['schemas']['ImageOutput'] + | components['schemas']['ImageCollectionOutput'] + | components['schemas']['DenoiseMaskOutput'] + | components['schemas']['LatentsOutput'] + | components['schemas']['LatentsCollectionOutput'] + | components['schemas']['ColorOutput'] + | components['schemas']['ColorCollectionOutput'] + | components['schemas']['ConditioningOutput'] + | components['schemas']['ConditioningCollectionOutput'] + | components['schemas']['ControlOutput'] + | components['schemas']['ModelLoaderOutput'] + | components['schemas']['LoraLoaderOutput'] + | components['schemas']['SDXLLoraLoaderOutput'] + | components['schemas']['VaeLoaderOutput'] + | components['schemas']['SeamlessModeOutput'] + | components['schemas']['SDXLModelLoaderOutput'] + | components['schemas']['SDXLRefinerModelLoaderOutput'] + | components['schemas']['MetadataAccumulatorOutput'] + | components['schemas']['IPAdapterOutput'] + | components['schemas']['ClipSkipInvocationOutput'] + | components['schemas']['SchedulerOutput'] + | components['schemas']['ONNXModelLoaderOutput'] + | components['schemas']['NoiseOutput'] + | components['schemas']['StringPosNegOutput'] + | components['schemas']['String2Output'] + | components['schemas']['GraphInvocationOutput'] + | components['schemas']['IterateInvocationOutput'] + | components['schemas']['CollectInvocationOutput']; }; /** * Errors @@ -3390,13 +3581,13 @@ export type components = { * Graph * @description The graph to run */ - graph?: components["schemas"]["Graph"]; + graph?: components['schemas']['Graph']; /** * Type * @default graph * @enum {string} */ - type: "graph"; + type: 'graph'; }; /** * GraphInvocationOutput @@ -3410,12 +3601,12 @@ export type components = { * @default graph_output * @enum {string} */ - type: "graph_output"; + type: 'graph_output'; }; /** HTTPValidationError */ HTTPValidationError: { /** Detail */ - detail?: components["schemas"]["ValidationError"][]; + detail?: components['schemas']['ValidationError'][]; }; /** * HED (softedge) Processor @@ -3448,13 +3639,13 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default hed_image_processor * @enum {string} */ - type: "hed_image_processor"; + type: 'hed_image_processor'; /** * Detect Resolution * @description Pixel resolution for detection @@ -3480,17 +3671,17 @@ export type components = { * Image * @description The IP-Adapter image prompt. */ - image: components["schemas"]["ImageField"]; + image: components['schemas']['ImageField']; /** * Ip Adapter Model * @description The IP-Adapter model to use. */ - ip_adapter_model: components["schemas"]["IPAdapterModelField"]; + ip_adapter_model: components['schemas']['IPAdapterModelField']; /** * Image Encoder Model * @description The name of the CLIP image encoder model. */ - image_encoder_model: components["schemas"]["CLIPVisionModelField"]; + image_encoder_model: components['schemas']['CLIPVisionModelField']; /** * Weight * @description The weight given to the ControlNet @@ -3541,12 +3732,12 @@ export type components = { * Image * @description The IP-Adapter image prompt. */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * IP-Adapter Model * @description The IP-Adapter model. */ - ip_adapter_model: components["schemas"]["IPAdapterModelField"]; + ip_adapter_model: components['schemas']['IPAdapterModelField']; /** * Weight * @description The weight given to the IP-Adapter @@ -3570,7 +3761,7 @@ export type components = { * @default ip_adapter * @enum {string} */ - type: "ip_adapter"; + type: 'ip_adapter'; }; /** IPAdapterMetadataField */ IPAdapterMetadataField: { @@ -3578,12 +3769,12 @@ export type components = { * Image * @description The IP-Adapter image prompt. */ - image: components["schemas"]["ImageField"]; + image: components['schemas']['ImageField']; /** * Ip Adapter Model * @description The IP-Adapter model to use. */ - ip_adapter_model: components["schemas"]["IPAdapterModelField"]; + ip_adapter_model: components['schemas']['IPAdapterModelField']; /** * Weight * @description The weight of the IP-Adapter model @@ -3610,18 +3801,18 @@ export type components = { */ model_name: string; /** @description Base model */ - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; }; /** IPAdapterModelInvokeAIConfig */ IPAdapterModelInvokeAIConfig: { /** Model Name */ model_name: string; - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** * Model Type * @enum {string} */ - model_type: "ip_adapter"; + model_type: 'ip_adapter'; /** Path */ path: string; /** Description */ @@ -3630,8 +3821,8 @@ export type components = { * Model Format * @enum {string} */ - model_format: "invokeai"; - error?: components["schemas"]["ModelError"]; + model_format: 'invokeai'; + error?: components['schemas']['ModelError']; }; /** * IPAdapterOutput @@ -3644,13 +3835,13 @@ export type components = { * IP-Adapter * @description IP-Adapter to apply */ - ip_adapter: components["schemas"]["IPAdapterField"]; + ip_adapter: components['schemas']['IPAdapterField']; /** * Type * @default ip_adapter_output * @enum {string} */ - type: "ip_adapter_output"; + type: 'ip_adapter_output'; }; /** * Blur Image @@ -3683,7 +3874,7 @@ export type components = { * Image * @description The image to blur */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Radius * @description The blur radius @@ -3696,13 +3887,13 @@ export type components = { * @default gaussian * @enum {string} */ - blur_type?: "gaussian" | "box"; + blur_type?: 'gaussian' | 'box'; /** * Type * @default img_blur * @enum {string} */ - type: "img_blur"; + type: 'img_blur'; }; /** * ImageCategory @@ -3715,7 +3906,7 @@ export type components = { * - OTHER: The image is some other type of image with a specialized purpose. To be used by external nodes. * @enum {string} */ - ImageCategory: "general" | "mask" | "control" | "user" | "other"; + ImageCategory: 'general' | 'mask' | 'control' | 'user' | 'other'; /** * Extract Image Channel * @description Gets a channel from an image. @@ -3747,20 +3938,20 @@ export type components = { * Image * @description The image to get the channel from */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Channel * @description The channel to get * @default A * @enum {string} */ - channel?: "A" | "R" | "G" | "B"; + channel?: 'A' | 'R' | 'G' | 'B'; /** * Type * @default img_chan * @enum {string} */ - type: "img_chan"; + type: 'img_chan'; }; /** * Multiply Image Channel @@ -3793,13 +3984,30 @@ export type components = { * Image * @description The image to adjust */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Channel * @description Which channel to adjust * @enum {string} */ - channel?: "Red (RGBA)" | "Green (RGBA)" | "Blue (RGBA)" | "Alpha (RGBA)" | "Cyan (CMYK)" | "Magenta (CMYK)" | "Yellow (CMYK)" | "Black (CMYK)" | "Hue (HSV)" | "Saturation (HSV)" | "Value (HSV)" | "Luminosity (LAB)" | "A (LAB)" | "B (LAB)" | "Y (YCbCr)" | "Cb (YCbCr)" | "Cr (YCbCr)"; + channel?: + | 'Red (RGBA)' + | 'Green (RGBA)' + | 'Blue (RGBA)' + | 'Alpha (RGBA)' + | 'Cyan (CMYK)' + | 'Magenta (CMYK)' + | 'Yellow (CMYK)' + | 'Black (CMYK)' + | 'Hue (HSV)' + | 'Saturation (HSV)' + | 'Value (HSV)' + | 'Luminosity (LAB)' + | 'A (LAB)' + | 'B (LAB)' + | 'Y (YCbCr)' + | 'Cb (YCbCr)' + | 'Cr (YCbCr)'; /** * Scale * @description The amount to scale the channel by. @@ -3817,7 +4025,7 @@ export type components = { * @default img_channel_multiply * @enum {string} */ - type: "img_channel_multiply"; + type: 'img_channel_multiply'; }; /** * Offset Image Channel @@ -3850,13 +4058,30 @@ export type components = { * Image * @description The image to adjust */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Channel * @description Which channel to adjust * @enum {string} */ - channel?: "Red (RGBA)" | "Green (RGBA)" | "Blue (RGBA)" | "Alpha (RGBA)" | "Cyan (CMYK)" | "Magenta (CMYK)" | "Yellow (CMYK)" | "Black (CMYK)" | "Hue (HSV)" | "Saturation (HSV)" | "Value (HSV)" | "Luminosity (LAB)" | "A (LAB)" | "B (LAB)" | "Y (YCbCr)" | "Cb (YCbCr)" | "Cr (YCbCr)"; + channel?: + | 'Red (RGBA)' + | 'Green (RGBA)' + | 'Blue (RGBA)' + | 'Alpha (RGBA)' + | 'Cyan (CMYK)' + | 'Magenta (CMYK)' + | 'Yellow (CMYK)' + | 'Black (CMYK)' + | 'Hue (HSV)' + | 'Saturation (HSV)' + | 'Value (HSV)' + | 'Luminosity (LAB)' + | 'A (LAB)' + | 'B (LAB)' + | 'Y (YCbCr)' + | 'Cb (YCbCr)' + | 'Cr (YCbCr)'; /** * Offset * @description The amount to adjust the channel by @@ -3868,7 +4093,7 @@ export type components = { * @default img_channel_offset * @enum {string} */ - type: "img_channel_offset"; + type: 'img_channel_offset'; }; /** * Image Collection Primitive @@ -3901,13 +4126,13 @@ export type components = { * Collection * @description The collection of image values */ - collection?: components["schemas"]["ImageField"][]; + collection?: components['schemas']['ImageField'][]; /** * Type * @default image_collection * @enum {string} */ - type: "image_collection"; + type: 'image_collection'; }; /** * ImageCollectionOutput @@ -3918,13 +4143,13 @@ export type components = { * Collection * @description The output images */ - collection: components["schemas"]["ImageField"][]; + collection: components['schemas']['ImageField'][]; /** * Type * @default image_collection_output * @enum {string} */ - type: "image_collection_output"; + type: 'image_collection_output'; }; /** * Convert Image Mode @@ -3957,20 +4182,29 @@ export type components = { * Image * @description The image to convert */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Mode * @description The mode to convert to * @default L * @enum {string} */ - mode?: "L" | "RGB" | "RGBA" | "CMYK" | "YCbCr" | "LAB" | "HSV" | "I" | "F"; + mode?: + | 'L' + | 'RGB' + | 'RGBA' + | 'CMYK' + | 'YCbCr' + | 'LAB' + | 'HSV' + | 'I' + | 'F'; /** * Type * @default img_conv * @enum {string} */ - type: "img_conv"; + type: 'img_conv'; }; /** * Crop Image @@ -4003,7 +4237,7 @@ export type components = { * Image * @description The image to crop */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * X * @description The left x coordinate of the crop rectangle @@ -4033,7 +4267,7 @@ export type components = { * @default img_crop * @enum {string} */ - type: "img_crop"; + type: 'img_crop'; }; /** * ImageDTO @@ -4056,9 +4290,9 @@ export type components = { */ thumbnail_url: string; /** @description The type of the image. */ - image_origin: components["schemas"]["ResourceOrigin"]; + image_origin: components['schemas']['ResourceOrigin']; /** @description The category of the image. */ - image_category: components["schemas"]["ImageCategory"]; + image_category: components['schemas']['ImageCategory']; /** * Width * @description The width of the image in px. @@ -4152,7 +4386,7 @@ export type components = { * Image * @description The image to adjust */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Hue * @description The degrees by which to rotate the hue, 0-360 @@ -4164,7 +4398,7 @@ export type components = { * @default img_hue_adjust * @enum {string} */ - type: "img_hue_adjust"; + type: 'img_hue_adjust'; }; /** * Inverse Lerp Image @@ -4197,7 +4431,7 @@ export type components = { * Image * @description The image to lerp */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Min * @description The minimum input value @@ -4215,7 +4449,7 @@ export type components = { * @default img_ilerp * @enum {string} */ - type: "img_ilerp"; + type: 'img_ilerp'; }; /** * Image Primitive @@ -4248,13 +4482,13 @@ export type components = { * Image * @description The image to load */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default image * @enum {string} */ - type: "image"; + type: 'image'; }; /** * Lerp Image @@ -4287,7 +4521,7 @@ export type components = { * Image * @description The image to lerp */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Min * @description The minimum output value @@ -4305,7 +4539,7 @@ export type components = { * @default img_lerp * @enum {string} */ - type: "img_lerp"; + type: 'img_lerp'; }; /** * ImageMetadata @@ -4354,18 +4588,18 @@ export type components = { * Image1 * @description The first image to multiply */ - image1?: components["schemas"]["ImageField"]; + image1?: components['schemas']['ImageField']; /** * Image2 * @description The second image to multiply */ - image2?: components["schemas"]["ImageField"]; + image2?: components['schemas']['ImageField']; /** * Type * @default img_mul * @enum {string} */ - type: "img_mul"; + type: 'img_mul'; }; /** * Blur NSFW Image @@ -4398,18 +4632,18 @@ export type components = { * Metadata * @description Optional core metadata to be written to image */ - metadata?: components["schemas"]["CoreMetadata"]; + metadata?: components['schemas']['CoreMetadata']; /** * Type * @default img_nsfw * @enum {string} */ - type: "img_nsfw"; + type: 'img_nsfw'; /** * Image * @description The image to check */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; }; /** * ImageOutput @@ -4420,7 +4654,7 @@ export type components = { * Image * @description The output image */ - image: components["schemas"]["ImageField"]; + image: components['schemas']['ImageField']; /** * Width * @description The width of the image in pixels @@ -4436,7 +4670,7 @@ export type components = { * @default image_output * @enum {string} */ - type: "image_output"; + type: 'image_output'; }; /** * Paste Image @@ -4469,17 +4703,17 @@ export type components = { * Base Image * @description The base image */ - base_image?: components["schemas"]["ImageField"]; + base_image?: components['schemas']['ImageField']; /** * Image * @description The image to paste */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Mask * @description The mask to use when pasting */ - mask?: components["schemas"]["ImageField"]; + mask?: components['schemas']['ImageField']; /** * X * @description The left x coordinate at which to paste the image @@ -4503,7 +4737,7 @@ export type components = { * @default img_paste * @enum {string} */ - type: "img_paste"; + type: 'img_paste'; }; /** * Base Image Processor @@ -4536,13 +4770,13 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default image_processor * @enum {string} */ - type: "image_processor"; + type: 'image_processor'; }; /** * ImageRecordChanges @@ -4556,7 +4790,7 @@ export type components = { */ ImageRecordChanges: { /** @description The image's new category. */ - image_category?: components["schemas"]["ImageCategory"]; + image_category?: components['schemas']['ImageCategory']; /** * Session Id * @description The image's new session ID. @@ -4604,7 +4838,7 @@ export type components = { * Image * @description The image to resize */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Width * @description The width to resize to (px) @@ -4623,18 +4857,24 @@ export type components = { * @default bicubic * @enum {string} */ - resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; + resample_mode?: + | 'nearest' + | 'box' + | 'bilinear' + | 'hamming' + | 'bicubic' + | 'lanczos'; /** * Metadata * @description Optional core metadata to be written to image */ - metadata?: components["schemas"]["CoreMetadata"]; + metadata?: components['schemas']['CoreMetadata']; /** * Type * @default img_resize * @enum {string} */ - type: "img_resize"; + type: 'img_resize'; }; /** * Scale Image @@ -4667,7 +4907,7 @@ export type components = { * Image * @description The image to scale */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Scale Factor * @description The factor by which to scale the image @@ -4680,13 +4920,19 @@ export type components = { * @default bicubic * @enum {string} */ - resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; + resample_mode?: + | 'nearest' + | 'box' + | 'bilinear' + | 'hamming' + | 'bicubic' + | 'lanczos'; /** * Type * @default img_scale * @enum {string} */ - type: "img_scale"; + type: 'img_scale'; }; /** * Image to Latents @@ -4719,12 +4965,12 @@ export type components = { * Image * @description The image to encode */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Vae * @description VAE */ - vae?: components["schemas"]["VaeField"]; + vae?: components['schemas']['VaeField']; /** * Tiled * @description Processing using overlapping tiles (reduce memory consumption) @@ -4742,7 +4988,7 @@ export type components = { * @default i2l * @enum {string} */ - type: "i2l"; + type: 'i2l'; }; /** * ImageUrlsDTO @@ -4796,7 +5042,7 @@ export type components = { * Image * @description The image to check */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Text * @description Watermark text @@ -4807,13 +5053,13 @@ export type components = { * Metadata * @description Optional core metadata to be written to image */ - metadata?: components["schemas"]["CoreMetadata"]; + metadata?: components['schemas']['CoreMetadata']; /** * Type * @default img_watermark * @enum {string} */ - type: "img_watermark"; + type: 'img_watermark'; }; /** ImagesDownloaded */ ImagesDownloaded: { @@ -4862,7 +5108,7 @@ export type components = { * Image * @description The image to infill */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Color * @description The color to use to infill @@ -4873,13 +5119,13 @@ export type components = { * "a": 255 * } */ - color?: components["schemas"]["ColorField"]; + color?: components['schemas']['ColorField']; /** * Type * @default infill_rgba * @enum {string} */ - type: "infill_rgba"; + type: 'infill_rgba'; }; /** * PatchMatch Infill @@ -4912,7 +5158,7 @@ export type components = { * Image * @description The image to infill */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Downscale * @description Run patchmatch on downscaled image to speedup infill @@ -4925,13 +5171,19 @@ export type components = { * @default bicubic * @enum {string} */ - resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; + resample_mode?: + | 'nearest' + | 'box' + | 'bilinear' + | 'hamming' + | 'bicubic' + | 'lanczos'; /** * Type * @default infill_patchmatch * @enum {string} */ - type: "infill_patchmatch"; + type: 'infill_patchmatch'; }; /** * Tile Infill @@ -4964,7 +5216,7 @@ export type components = { * Image * @description The image to infill */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Tile Size * @description The tile size (px) @@ -4981,7 +5233,7 @@ export type components = { * @default infill_tile * @enum {string} */ - type: "infill_tile"; + type: 'infill_tile'; }; /** * Integer Collection Primitive @@ -5020,7 +5272,7 @@ export type components = { * @default integer_collection * @enum {string} */ - type: "integer_collection"; + type: 'integer_collection'; }; /** * IntegerCollectionOutput @@ -5037,7 +5289,7 @@ export type components = { * @default integer_collection_output * @enum {string} */ - type: "integer_collection_output"; + type: 'integer_collection_output'; }; /** * Integer Primitive @@ -5077,7 +5329,7 @@ export type components = { * @default integer * @enum {string} */ - type: "integer"; + type: 'integer'; }; /** * Integer Math @@ -5112,7 +5364,16 @@ export type components = { * @default ADD * @enum {string} */ - operation?: "ADD" | "SUB" | "MUL" | "DIV" | "EXP" | "MOD" | "ABS" | "MIN" | "MAX"; + operation?: + | 'ADD' + | 'SUB' + | 'MUL' + | 'DIV' + | 'EXP' + | 'MOD' + | 'ABS' + | 'MIN' + | 'MAX'; /** * A * @description The first number @@ -5130,7 +5391,7 @@ export type components = { * @default integer_math * @enum {string} */ - type: "integer_math"; + type: 'integer_math'; }; /** * IntegerOutput @@ -5147,7 +5408,7 @@ export type components = { * @default integer_output * @enum {string} */ - type: "integer_output"; + type: 'integer_output'; }; /** InvocationCacheStatus */ InvocationCacheStatus: { @@ -5220,7 +5481,7 @@ export type components = { * @default iterate * @enum {string} */ - type: "iterate"; + type: 'iterate'; }; /** * IterateInvocationOutput @@ -5237,7 +5498,7 @@ export type components = { * @default iterate_output * @enum {string} */ - type: "iterate_output"; + type: 'iterate_output'; }; /** * LaMa Infill @@ -5270,13 +5531,13 @@ export type components = { * Image * @description The image to infill */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default infill_lama * @enum {string} */ - type: "infill_lama"; + type: 'infill_lama'; }; /** * Latents Collection Primitive @@ -5309,13 +5570,13 @@ export type components = { * Collection * @description The collection of latents tensors */ - collection?: components["schemas"]["LatentsField"][]; + collection?: components['schemas']['LatentsField'][]; /** * Type * @default latents_collection * @enum {string} */ - type: "latents_collection"; + type: 'latents_collection'; }; /** * LatentsCollectionOutput @@ -5326,13 +5587,13 @@ export type components = { * Collection * @description Latents tensor */ - collection: components["schemas"]["LatentsField"][]; + collection: components['schemas']['LatentsField'][]; /** * Type * @default latents_collection_output * @enum {string} */ - type: "latents_collection_output"; + type: 'latents_collection_output'; }; /** * LatentsField @@ -5381,13 +5642,13 @@ export type components = { * Latents * @description The latents tensor */ - latents?: components["schemas"]["LatentsField"]; + latents?: components['schemas']['LatentsField']; /** * Type * @default latents * @enum {string} */ - type: "latents"; + type: 'latents'; }; /** * LatentsOutput @@ -5398,7 +5659,7 @@ export type components = { * Latents * @description Latents tensor */ - latents: components["schemas"]["LatentsField"]; + latents: components['schemas']['LatentsField']; /** * Width * @description Width of output (px) @@ -5414,7 +5675,7 @@ export type components = { * @default latents_output * @enum {string} */ - type: "latents_output"; + type: 'latents_output'; }; /** * Latents to Image @@ -5459,23 +5720,23 @@ export type components = { * Metadata * @description Optional core metadata to be written to image */ - metadata?: components["schemas"]["CoreMetadata"]; + metadata?: components['schemas']['CoreMetadata']; /** * Type * @default l2i * @enum {string} */ - type: "l2i"; + type: 'l2i'; /** * Latents * @description Latents tensor */ - latents?: components["schemas"]["LatentsField"]; + latents?: components['schemas']['LatentsField']; /** * Vae * @description VAE */ - vae?: components["schemas"]["VaeField"]; + vae?: components['schemas']['VaeField']; }; /** * Leres (Depth) Processor @@ -5508,13 +5769,13 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default leres_image_processor * @enum {string} */ - type: "leres_image_processor"; + type: 'leres_image_processor'; /** * Thr A * @description Leres parameter `thr_a` @@ -5577,13 +5838,13 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default lineart_anime_image_processor * @enum {string} */ - type: "lineart_anime_image_processor"; + type: 'lineart_anime_image_processor'; /** * Detect Resolution * @description Pixel resolution for detection @@ -5628,13 +5889,13 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default lineart_image_processor * @enum {string} */ - type: "lineart_image_processor"; + type: 'lineart_image_processor'; /** * Detect Resolution * @description Pixel resolution for detection @@ -5663,7 +5924,7 @@ export type components = { * Lora * @description The LoRA model */ - lora: components["schemas"]["LoRAModelField"]; + lora: components['schemas']['LoRAModelField']; /** * Weight * @description The weight of the LoRA model @@ -5674,18 +5935,18 @@ export type components = { LoRAModelConfig: { /** Model Name */ model_name: string; - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** * Model Type * @enum {string} */ - model_type: "lora"; + model_type: 'lora'; /** Path */ path: string; /** Description */ description?: string; - model_format: components["schemas"]["LoRAModelFormat"]; - error?: components["schemas"]["ModelError"]; + model_format: components['schemas']['LoRAModelFormat']; + error?: components['schemas']['ModelError']; }; /** * LoRAModelField @@ -5698,14 +5959,14 @@ export type components = { */ model_name: string; /** @description Base model */ - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; }; /** * LoRAModelFormat * @description An enumeration. * @enum {string} */ - LoRAModelFormat: "lycoris" | "diffusers"; + LoRAModelFormat: 'lycoris' | 'diffusers'; /** * LogLevel * @description An enumeration. @@ -5720,11 +5981,11 @@ export type components = { */ model_name: string; /** @description Base model */ - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** @description Info to load submodel */ - model_type: components["schemas"]["ModelType"]; + model_type: components['schemas']['ModelType']; /** @description Info to load submodel */ - submodel?: components["schemas"]["SubModelType"]; + submodel?: components['schemas']['SubModelType']; /** * Weight * @description Lora's weight which to use when apply to model @@ -5762,7 +6023,7 @@ export type components = { * LoRA * @description LoRA model to load */ - lora: components["schemas"]["LoRAModelField"]; + lora: components['schemas']['LoRAModelField']; /** * Weight * @description The weight at which the LoRA is applied to each model @@ -5773,18 +6034,18 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components['schemas']['UNetField']; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components['schemas']['ClipField']; /** * Type * @default lora_loader * @enum {string} */ - type: "lora_loader"; + type: 'lora_loader'; }; /** * LoraLoaderOutput @@ -5795,18 +6056,18 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components['schemas']['UNetField']; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components['schemas']['ClipField']; /** * Type * @default lora_loader_output * @enum {string} */ - type: "lora_loader_output"; + type: 'lora_loader_output'; }; /** * MainModelField @@ -5819,9 +6080,9 @@ export type components = { */ model_name: string; /** @description Base model */ - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** @description Model Type */ - model_type: components["schemas"]["ModelType"]; + model_type: components['schemas']['ModelType']; }; /** * Main Model @@ -5854,13 +6115,13 @@ export type components = { * Model * @description Main model (UNet, VAE, CLIP) to load */ - model: components["schemas"]["MainModelField"]; + model: components['schemas']['MainModelField']; /** * Type * @default main_model_loader * @enum {string} */ - type: "main_model_loader"; + type: 'main_model_loader'; }; /** * Combine Masks @@ -5893,18 +6154,18 @@ export type components = { * Mask1 * @description The first mask to combine */ - mask1?: components["schemas"]["ImageField"]; + mask1?: components['schemas']['ImageField']; /** * Mask2 * @description The second image to combine */ - mask2?: components["schemas"]["ImageField"]; + mask2?: components['schemas']['ImageField']; /** * Type * @default mask_combine * @enum {string} */ - type: "mask_combine"; + type: 'mask_combine'; }; /** * Mask Edge @@ -5937,7 +6198,7 @@ export type components = { * Image * @description The image to apply the mask to */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Edge Size * @description The size of the edge @@ -5963,7 +6224,7 @@ export type components = { * @default mask_edge * @enum {string} */ - type: "mask_edge"; + type: 'mask_edge'; }; /** * Mask from Alpha @@ -5996,7 +6257,7 @@ export type components = { * Image * @description The image to create the mask from */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Invert * @description Whether or not to invert the mask @@ -6008,7 +6269,7 @@ export type components = { * @default tomask * @enum {string} */ - type: "tomask"; + type: 'tomask'; }; /** * Mediapipe Face Processor @@ -6041,13 +6302,13 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default mediapipe_face_processor * @enum {string} */ - type: "mediapipe_face_processor"; + type: 'mediapipe_face_processor'; /** * Max Faces * @description Maximum number of faces to detect @@ -6066,7 +6327,11 @@ export type components = { * @description An enumeration. * @enum {string} */ - MergeInterpolationMethod: "weighted_sum" | "sigmoid" | "inv_sigmoid" | "add_difference"; + MergeInterpolationMethod: + | 'weighted_sum' + | 'sigmoid' + | 'inv_sigmoid' + | 'add_difference'; /** * Metadata Accumulator * @description Outputs a Core Metadata Object @@ -6153,27 +6418,27 @@ export type components = { * Model * @description The main model used for inference */ - model?: components["schemas"]["MainModelField"]; + model?: components['schemas']['MainModelField']; /** * Controlnets * @description The ControlNets used for inference */ - controlnets?: components["schemas"]["ControlField"][]; + controlnets?: components['schemas']['ControlField'][]; /** * Ipadapters * @description The IP Adapters used for inference */ - ipAdapters?: components["schemas"]["IPAdapterMetadataField"][]; + ipAdapters?: components['schemas']['IPAdapterMetadataField'][]; /** * T2Iadapters * @description The IP Adapters used for inference */ - t2iAdapters: components["schemas"]["T2IAdapterField"][]; + t2iAdapters: components['schemas']['T2IAdapterField'][]; /** * Loras * @description The LoRAs used for inference */ - loras?: components["schemas"]["LoRAMetadataField"][]; + loras?: components['schemas']['LoRAMetadataField'][]; /** * Strength * @description The strength used for latents-to-latents @@ -6188,7 +6453,7 @@ export type components = { * Vae * @description The VAE used for decoding, if the main model's default was not used */ - vae?: components["schemas"]["VAEModelField"]; + vae?: components['schemas']['VAEModelField']; /** * Positive Style Prompt * @description The positive style prompt parameter @@ -6203,7 +6468,7 @@ export type components = { * Refiner Model * @description The SDXL Refiner model used */ - refiner_model?: components["schemas"]["MainModelField"]; + refiner_model?: components['schemas']['MainModelField']; /** * Refiner Cfg Scale * @description The classifier-free guidance scale parameter used for the refiner @@ -6239,7 +6504,7 @@ export type components = { * @default metadata_accumulator * @enum {string} */ - type: "metadata_accumulator"; + type: 'metadata_accumulator'; }; /** * MetadataAccumulatorOutput @@ -6250,13 +6515,13 @@ export type components = { * Metadata * @description The core metadata for the image */ - metadata: components["schemas"]["CoreMetadata"]; + metadata: components['schemas']['CoreMetadata']; /** * Type * @default metadata_accumulator_output * @enum {string} */ - type: "metadata_accumulator_output"; + type: 'metadata_accumulator_output'; }; /** * Midas Depth Processor @@ -6289,13 +6554,13 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default midas_depth_image_processor * @enum {string} */ - type: "midas_depth_image_processor"; + type: 'midas_depth_image_processor'; /** * A Mult * @description Midas parameter `a_mult` (a = a_mult * PI) @@ -6340,13 +6605,13 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default mlsd_image_processor * @enum {string} */ - type: "mlsd_image_processor"; + type: 'mlsd_image_processor'; /** * Detect Resolution * @description Pixel resolution for detection @@ -6377,7 +6642,7 @@ export type components = { * @description An enumeration. * @enum {string} */ - ModelError: "not_found"; + ModelError: 'not_found'; /** ModelInfo */ ModelInfo: { /** @@ -6386,11 +6651,11 @@ export type components = { */ model_name: string; /** @description Base model */ - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** @description Info to load submodel */ - model_type: components["schemas"]["ModelType"]; + model_type: components['schemas']['ModelType']; /** @description Info to load submodel */ - submodel?: components["schemas"]["SubModelType"]; + submodel?: components['schemas']['SubModelType']; }; /** * ModelLoaderOutput @@ -6401,40 +6666,66 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet: components["schemas"]["UNetField"]; + unet: components['schemas']['UNetField']; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip: components["schemas"]["ClipField"]; + clip: components['schemas']['ClipField']; /** * VAE * @description VAE */ - vae: components["schemas"]["VaeField"]; + vae: components['schemas']['VaeField']; /** * Type * @default model_loader_output * @enum {string} */ - type: "model_loader_output"; + type: 'model_loader_output'; }; /** * ModelType * @description An enumeration. * @enum {string} */ - ModelType: "onnx" | "main" | "vae" | "lora" | "controlnet" | "embedding" | "ip_adapter" | "clip_vision" | "t2i_adapter"; + ModelType: + | 'onnx' + | 'main' + | 'vae' + | 'lora' + | 'controlnet' + | 'embedding' + | 'ip_adapter' + | 'clip_vision' + | 't2i_adapter'; /** * ModelVariantType * @description An enumeration. * @enum {string} */ - ModelVariantType: "normal" | "inpaint" | "depth"; + ModelVariantType: 'normal' | 'inpaint' | 'depth'; /** ModelsList */ ModelsList: { /** Models */ - models: (components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["IPAdapterModelInvokeAIConfig"] | components["schemas"]["CLIPVisionModelDiffusersConfig"] | components["schemas"]["T2IAdapterModelDiffusersConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"])[]; + models: ( + | components['schemas']['ONNXStableDiffusion1ModelConfig'] + | components['schemas']['StableDiffusion1ModelCheckpointConfig'] + | components['schemas']['StableDiffusion1ModelDiffusersConfig'] + | components['schemas']['VaeModelConfig'] + | components['schemas']['LoRAModelConfig'] + | components['schemas']['ControlNetModelCheckpointConfig'] + | components['schemas']['ControlNetModelDiffusersConfig'] + | components['schemas']['TextualInversionModelConfig'] + | components['schemas']['IPAdapterModelInvokeAIConfig'] + | components['schemas']['CLIPVisionModelDiffusersConfig'] + | components['schemas']['T2IAdapterModelDiffusersConfig'] + | components['schemas']['ONNXStableDiffusion2ModelConfig'] + | components['schemas']['StableDiffusion2ModelCheckpointConfig'] + | components['schemas']['StableDiffusion2ModelDiffusersConfig'] + | components['schemas']['StableDiffusionXLModelCheckpointConfig'] + | components['schemas']['StableDiffusionXLModelDiffusersConfig'] + )[]; }; /** * Multiply Integers @@ -6480,7 +6771,7 @@ export type components = { * @default mul * @enum {string} */ - type: "mul"; + type: 'mul'; }; /** NodeFieldValue */ NodeFieldValue: { @@ -6555,7 +6846,7 @@ export type components = { * @default noise * @enum {string} */ - type: "noise"; + type: 'noise'; }; /** * NoiseOutput @@ -6566,7 +6857,7 @@ export type components = { * Noise * @description Noise tensor */ - noise?: components["schemas"]["LatentsField"]; + noise?: components['schemas']['LatentsField']; /** * Width * @description Width of output (px) @@ -6582,7 +6873,7 @@ export type components = { * @default noise_output * @enum {string} */ - type: "noise_output"; + type: 'noise_output'; }; /** * Normal BAE Processor @@ -6615,13 +6906,13 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default normalbae_image_processor * @enum {string} */ - type: "normalbae_image_processor"; + type: 'normalbae_image_processor'; /** * Detect Resolution * @description Pixel resolution for detection @@ -6666,23 +6957,23 @@ export type components = { * Latents * @description Denoised latents tensor */ - latents?: components["schemas"]["LatentsField"]; + latents?: components['schemas']['LatentsField']; /** * Vae * @description VAE */ - vae?: components["schemas"]["VaeField"]; + vae?: components['schemas']['VaeField']; /** * Metadata * @description Optional core metadata to be written to image */ - metadata?: components["schemas"]["CoreMetadata"]; + metadata?: components['schemas']['CoreMetadata']; /** * Type * @default l2i_onnx * @enum {string} */ - type: "l2i_onnx"; + type: 'l2i_onnx'; }; /** * ONNXModelLoaderOutput @@ -6693,28 +6984,28 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components['schemas']['UNetField']; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components['schemas']['ClipField']; /** * VAE Decoder * @description VAE */ - vae_decoder?: components["schemas"]["VaeField"]; + vae_decoder?: components['schemas']['VaeField']; /** * VAE Encoder * @description VAE */ - vae_encoder?: components["schemas"]["VaeField"]; + vae_encoder?: components['schemas']['VaeField']; /** * Type * @default model_loader_output_onnx * @enum {string} */ - type: "model_loader_output_onnx"; + type: 'model_loader_output_onnx'; }; /** * ONNX Prompt (Raw) @@ -6756,24 +7047,24 @@ export type components = { * Clip * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components['schemas']['ClipField']; /** * Type * @default prompt_onnx * @enum {string} */ - type: "prompt_onnx"; + type: 'prompt_onnx'; }; /** ONNXStableDiffusion1ModelConfig */ ONNXStableDiffusion1ModelConfig: { /** Model Name */ model_name: string; - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** * Model Type * @enum {string} */ - model_type: "onnx"; + model_type: 'onnx'; /** Path */ path: string; /** Description */ @@ -6782,20 +7073,20 @@ export type components = { * Model Format * @enum {string} */ - model_format: "onnx"; - error?: components["schemas"]["ModelError"]; - variant: components["schemas"]["ModelVariantType"]; + model_format: 'onnx'; + error?: components['schemas']['ModelError']; + variant: components['schemas']['ModelVariantType']; }; /** ONNXStableDiffusion2ModelConfig */ ONNXStableDiffusion2ModelConfig: { /** Model Name */ model_name: string; - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** * Model Type * @enum {string} */ - model_type: "onnx"; + model_type: 'onnx'; /** Path */ path: string; /** Description */ @@ -6804,10 +7095,10 @@ export type components = { * Model Format * @enum {string} */ - model_format: "onnx"; - error?: components["schemas"]["ModelError"]; - variant: components["schemas"]["ModelVariantType"]; - prediction_type: components["schemas"]["SchedulerPredictionType"]; + model_format: 'onnx'; + error?: components['schemas']['ModelError']; + variant: components['schemas']['ModelVariantType']; + prediction_type: components['schemas']['SchedulerPredictionType']; /** Upcast Attention */ upcast_attention: boolean; }; @@ -6842,17 +7133,17 @@ export type components = { * Positive Conditioning * @description Positive conditioning tensor */ - positive_conditioning?: components["schemas"]["ConditioningField"]; + positive_conditioning?: components['schemas']['ConditioningField']; /** * Negative Conditioning * @description Negative conditioning tensor */ - negative_conditioning?: components["schemas"]["ConditioningField"]; + negative_conditioning?: components['schemas']['ConditioningField']; /** * Noise * @description Noise tensor */ - noise?: components["schemas"]["LatentsField"]; + noise?: components['schemas']['LatentsField']; /** * Steps * @description Number of steps to run @@ -6871,82 +7162,120 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; + scheduler?: + | 'ddim' + | 'ddpm' + | 'deis' + | 'lms' + | 'lms_k' + | 'pndm' + | 'heun' + | 'heun_k' + | 'euler' + | 'euler_k' + | 'euler_a' + | 'kdpm_2' + | 'kdpm_2_a' + | 'dpmpp_2s' + | 'dpmpp_2s_k' + | 'dpmpp_2m' + | 'dpmpp_2m_k' + | 'dpmpp_2m_sde' + | 'dpmpp_2m_sde_k' + | 'dpmpp_sde' + | 'dpmpp_sde_k' + | 'unipc'; /** * Precision * @description Precision to use * @default tensor(float16) * @enum {string} */ - precision?: "tensor(bool)" | "tensor(int8)" | "tensor(uint8)" | "tensor(int16)" | "tensor(uint16)" | "tensor(int32)" | "tensor(uint32)" | "tensor(int64)" | "tensor(uint64)" | "tensor(float16)" | "tensor(float)" | "tensor(double)"; + precision?: + | 'tensor(bool)' + | 'tensor(int8)' + | 'tensor(uint8)' + | 'tensor(int16)' + | 'tensor(uint16)' + | 'tensor(int32)' + | 'tensor(uint32)' + | 'tensor(int64)' + | 'tensor(uint64)' + | 'tensor(float16)' + | 'tensor(float)' + | 'tensor(double)'; /** * Unet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components['schemas']['UNetField']; /** * Control * @description ControlNet(s) to apply */ - control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][]; + control?: + | components['schemas']['ControlField'] + | components['schemas']['ControlField'][]; /** * Type * @default t2l_onnx * @enum {string} */ - type: "t2l_onnx"; + type: 't2l_onnx'; }; /** * OffsetPaginatedResults[BoardDTO] * @description Offset-paginated results + * Generic must be a Pydantic model */ OffsetPaginatedResults_BoardDTO_: { - /** - * Items - * @description Items - */ - items: components["schemas"]["BoardDTO"][]; - /** - * Offset - * @description Offset from which to retrieve items - */ - offset: number; /** * Limit * @description Limit of items to get */ limit: number; + /** + * Offset + * @description Offset from which to retrieve items + */ + offset: number; /** * Total * @description Total number of items in result */ total: number; + /** + * Items + * @description Items + */ + items: components['schemas']['BoardDTO'][]; }; /** * OffsetPaginatedResults[ImageDTO] * @description Offset-paginated results + * Generic must be a Pydantic model */ OffsetPaginatedResults_ImageDTO_: { - /** - * Items - * @description Items - */ - items: components["schemas"]["ImageDTO"][]; - /** - * Offset - * @description Offset from which to retrieve items - */ - offset: number; /** * Limit * @description Limit of items to get */ limit: number; + /** + * Offset + * @description Offset from which to retrieve items + */ + offset: number; /** * Total * @description Total number of items in result */ total: number; + /** + * Items + * @description Items + */ + items: components['schemas']['ImageDTO'][]; }; /** * OnnxModelField @@ -6959,9 +7288,9 @@ export type components = { */ model_name: string; /** @description Base model */ - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** @description Model Type */ - model_type: components["schemas"]["ModelType"]; + model_type: components['schemas']['ModelType']; }; /** * ONNX Main Model @@ -6994,13 +7323,13 @@ export type components = { * Model * @description ONNX Main model (UNet, VAE, CLIP) to load */ - model: components["schemas"]["OnnxModelField"]; + model: components['schemas']['OnnxModelField']; /** * Type * @default onnx_model_loader * @enum {string} */ - type: "onnx_model_loader"; + type: 'onnx_model_loader'; }; /** * Openpose Processor @@ -7033,13 +7362,13 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default openpose_image_processor * @enum {string} */ - type: "openpose_image_processor"; + type: 'openpose_image_processor'; /** * Hand And Face * @description Whether to use hands and face mode @@ -7062,13 +7391,9 @@ export type components = { /** * PaginatedResults[GraphExecutionState] * @description Paginated results + * Generic must be a Pydantic model */ PaginatedResults_GraphExecutionState_: { - /** - * Items - * @description Items - */ - items: components["schemas"]["GraphExecutionState"][]; /** * Page * @description Current Page @@ -7089,6 +7414,11 @@ export type components = { * @description Total number of items in result */ total: number; + /** + * Items + * @description Items + */ + items: components['schemas']['GraphExecutionState'][]; }; /** * PIDI Processor @@ -7121,13 +7451,13 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default pidi_image_processor * @enum {string} */ - type: "pidi_image_processor"; + type: 'pidi_image_processor'; /** * Detect Resolution * @description Pixel resolution for detection @@ -7212,7 +7542,7 @@ export type components = { * @default prompt_from_file * @enum {string} */ - type: "prompt_from_file"; + type: 'prompt_from_file'; }; /** * PruneResult @@ -7275,7 +7605,7 @@ export type components = { * @default rand_float * @enum {string} */ - type: "rand_float"; + type: 'rand_float'; }; /** * Random Integer @@ -7321,7 +7651,7 @@ export type components = { * @default rand_int * @enum {string} */ - type: "rand_int"; + type: 'rand_int'; }; /** * Random Range @@ -7378,7 +7708,7 @@ export type components = { * @default random_range * @enum {string} */ - type: "random_range"; + type: 'random_range'; }; /** * Integer Range @@ -7430,7 +7760,7 @@ export type components = { * @default range * @enum {string} */ - type: "range"; + type: 'range'; }; /** * Integer Range of Size @@ -7482,7 +7812,7 @@ export type components = { * @default range_of_size * @enum {string} */ - type: "range_of_size"; + type: 'range_of_size'; }; /** RemoveImagesFromBoardResult */ RemoveImagesFromBoardResult: { @@ -7523,7 +7853,7 @@ export type components = { * Latents * @description Latents tensor */ - latents?: components["schemas"]["LatentsField"]; + latents?: components['schemas']['LatentsField']; /** * Width * @description Width of output (px) @@ -7540,7 +7870,14 @@ export type components = { * @default bilinear * @enum {string} */ - mode?: "nearest" | "linear" | "bilinear" | "bicubic" | "trilinear" | "area" | "nearest-exact"; + mode?: + | 'nearest' + | 'linear' + | 'bilinear' + | 'bicubic' + | 'trilinear' + | 'area' + | 'nearest-exact'; /** * Antialias * @description Whether or not to apply antialiasing (bilinear or bicubic only) @@ -7552,7 +7889,7 @@ export type components = { * @default lresize * @enum {string} */ - type: "lresize"; + type: 'lresize'; }; /** * ResourceOrigin @@ -7563,7 +7900,7 @@ export type components = { * This may be a user-initiated upload, or an internal application upload (eg Canvas init image). * @enum {string} */ - ResourceOrigin: "internal" | "external"; + ResourceOrigin: 'internal' | 'external'; /** * Round Float * @description Rounds a float to a specified number of decimal places. @@ -7608,7 +7945,7 @@ export type components = { * @default round_float * @enum {string} */ - type: "round_float"; + type: 'round_float'; }; /** * SDXL Prompt @@ -7683,18 +8020,18 @@ export type components = { * CLIP 1 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components['schemas']['ClipField']; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2?: components["schemas"]["ClipField"]; + clip2?: components['schemas']['ClipField']; /** * Type * @default sdxl_compel_prompt * @enum {string} */ - type: "sdxl_compel_prompt"; + type: 'sdxl_compel_prompt'; }; /** * SDXL LoRA @@ -7727,7 +8064,7 @@ export type components = { * LoRA * @description LoRA model to load */ - lora: components["schemas"]["LoRAModelField"]; + lora: components['schemas']['LoRAModelField']; /** * Weight * @description The weight at which the LoRA is applied to each model @@ -7738,23 +8075,23 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components['schemas']['UNetField']; /** * CLIP 1 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components['schemas']['ClipField']; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2?: components["schemas"]["ClipField"]; + clip2?: components['schemas']['ClipField']; /** * Type * @default sdxl_lora_loader * @enum {string} */ - type: "sdxl_lora_loader"; + type: 'sdxl_lora_loader'; }; /** * SDXLLoraLoaderOutput @@ -7765,23 +8102,23 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components['schemas']['UNetField']; /** * CLIP 1 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components['schemas']['ClipField']; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2?: components["schemas"]["ClipField"]; + clip2?: components['schemas']['ClipField']; /** * Type * @default sdxl_lora_loader_output * @enum {string} */ - type: "sdxl_lora_loader_output"; + type: 'sdxl_lora_loader_output'; }; /** * SDXL Main Model @@ -7814,13 +8151,13 @@ export type components = { * Model * @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load */ - model: components["schemas"]["MainModelField"]; + model: components['schemas']['MainModelField']; /** * Type * @default sdxl_model_loader * @enum {string} */ - type: "sdxl_model_loader"; + type: 'sdxl_model_loader'; }; /** * SDXLModelLoaderOutput @@ -7831,28 +8168,28 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet: components["schemas"]["UNetField"]; + unet: components['schemas']['UNetField']; /** * CLIP 1 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip: components["schemas"]["ClipField"]; + clip: components['schemas']['ClipField']; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2: components["schemas"]["ClipField"]; + clip2: components['schemas']['ClipField']; /** * VAE * @description VAE */ - vae: components["schemas"]["VaeField"]; + vae: components['schemas']['VaeField']; /** * Type * @default sdxl_model_loader_output * @enum {string} */ - type: "sdxl_model_loader_output"; + type: 'sdxl_model_loader_output'; }; /** * SDXL Refiner Prompt @@ -7917,13 +8254,13 @@ export type components = { * Clip2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2?: components["schemas"]["ClipField"]; + clip2?: components['schemas']['ClipField']; /** * Type * @default sdxl_refiner_compel_prompt * @enum {string} */ - type: "sdxl_refiner_compel_prompt"; + type: 'sdxl_refiner_compel_prompt'; }; /** * SDXL Refiner Model @@ -7956,13 +8293,13 @@ export type components = { * Model * @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load */ - model: components["schemas"]["MainModelField"]; + model: components['schemas']['MainModelField']; /** * Type * @default sdxl_refiner_model_loader * @enum {string} */ - type: "sdxl_refiner_model_loader"; + type: 'sdxl_refiner_model_loader'; }; /** * SDXLRefinerModelLoaderOutput @@ -7973,23 +8310,23 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet: components["schemas"]["UNetField"]; + unet: components['schemas']['UNetField']; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2: components["schemas"]["ClipField"]; + clip2: components['schemas']['ClipField']; /** * VAE * @description VAE */ - vae: components["schemas"]["VaeField"]; + vae: components['schemas']['VaeField']; /** * Type * @default sdxl_refiner_model_loader_output * @enum {string} */ - type: "sdxl_refiner_model_loader_output"; + type: 'sdxl_refiner_model_loader_output'; }; /** * Save Image @@ -8022,23 +8359,23 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Board * @description The board to save the image to */ - board?: components["schemas"]["BoardField"]; + board?: components['schemas']['BoardField']; /** * Metadata * @description Optional core metadata to be written to image */ - metadata?: components["schemas"]["CoreMetadata"]; + metadata?: components['schemas']['CoreMetadata']; /** * Type * @default save_image * @enum {string} */ - type: "save_image"; + type: 'save_image'; }; /** * Scale Latents @@ -8071,7 +8408,7 @@ export type components = { * Latents * @description Latents tensor */ - latents?: components["schemas"]["LatentsField"]; + latents?: components['schemas']['LatentsField']; /** * Scale Factor * @description The factor by which to scale @@ -8083,7 +8420,14 @@ export type components = { * @default bilinear * @enum {string} */ - mode?: "nearest" | "linear" | "bilinear" | "bicubic" | "trilinear" | "area" | "nearest-exact"; + mode?: + | 'nearest' + | 'linear' + | 'bilinear' + | 'bicubic' + | 'trilinear' + | 'area' + | 'nearest-exact'; /** * Antialias * @description Whether or not to apply antialiasing (bilinear or bicubic only) @@ -8095,7 +8439,7 @@ export type components = { * @default lscale * @enum {string} */ - type: "lscale"; + type: 'lscale'; }; /** * Scheduler @@ -8130,13 +8474,35 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; + scheduler?: + | 'ddim' + | 'ddpm' + | 'deis' + | 'lms' + | 'lms_k' + | 'pndm' + | 'heun' + | 'heun_k' + | 'euler' + | 'euler_k' + | 'euler_a' + | 'kdpm_2' + | 'kdpm_2_a' + | 'dpmpp_2s' + | 'dpmpp_2s_k' + | 'dpmpp_2m' + | 'dpmpp_2m_k' + | 'dpmpp_2m_sde' + | 'dpmpp_2m_sde_k' + | 'dpmpp_sde' + | 'dpmpp_sde_k' + | 'unipc'; /** * Type * @default scheduler * @enum {string} */ - type: "scheduler"; + type: 'scheduler'; }; /** * SchedulerOutput @@ -8150,20 +8516,42 @@ export type components = { * @description Scheduler to use during inference * @enum {string} */ - scheduler: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; + scheduler: + | 'ddim' + | 'ddpm' + | 'deis' + | 'lms' + | 'lms_k' + | 'pndm' + | 'heun' + | 'heun_k' + | 'euler' + | 'euler_k' + | 'euler_a' + | 'kdpm_2' + | 'kdpm_2_a' + | 'dpmpp_2s' + | 'dpmpp_2s_k' + | 'dpmpp_2m' + | 'dpmpp_2m_k' + | 'dpmpp_2m_sde' + | 'dpmpp_2m_sde_k' + | 'dpmpp_sde' + | 'dpmpp_sde_k' + | 'unipc'; /** * Type * @default scheduler_output * @enum {string} */ - type: "scheduler_output"; + type: 'scheduler_output'; }; /** * SchedulerPredictionType * @description An enumeration. * @enum {string} */ - SchedulerPredictionType: "epsilon" | "v_prediction" | "sample"; + SchedulerPredictionType: 'epsilon' | 'v_prediction' | 'sample'; /** * Seamless * @description Applies the seamless transformation to the Model UNet and VAE. @@ -8195,12 +8583,12 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components['schemas']['UNetField']; /** * VAE * @description VAE model to load */ - vae?: components["schemas"]["VaeField"]; + vae?: components['schemas']['VaeField']; /** * Seamless Y * @description Specify whether Y axis is seamless @@ -8218,7 +8606,7 @@ export type components = { * @default seamless * @enum {string} */ - type: "seamless"; + type: 'seamless'; }; /** * SeamlessModeOutput @@ -8229,18 +8617,18 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components['schemas']['UNetField']; /** * VAE * @description VAE */ - vae?: components["schemas"]["VaeField"]; + vae?: components['schemas']['VaeField']; /** * Type * @default seamless_output * @enum {string} */ - type: "seamless_output"; + type: 'seamless_output'; }; /** * Segment Anything Processor @@ -8273,13 +8661,13 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default segment_anything_processor * @enum {string} */ - type: "segment_anything_processor"; + type: 'segment_anything_processor'; }; /** SessionProcessorStatus */ SessionProcessorStatus: { @@ -8299,8 +8687,8 @@ export type components = { * @description The overall status of session queue and processor */ SessionQueueAndProcessorStatus: { - queue: components["schemas"]["SessionQueueStatus"]; - processor: components["schemas"]["SessionProcessorStatus"]; + queue: components['schemas']['SessionQueueStatus']; + processor: components['schemas']['SessionProcessorStatus']; }; /** * SessionQueueItem @@ -8318,7 +8706,7 @@ export type components = { * @default pending * @enum {string} */ - status: "pending" | "in_progress" | "completed" | "failed" | "canceled"; + status: 'pending' | 'in_progress' | 'completed' | 'failed' | 'canceled'; /** * Priority * @description The priority of this queue item @@ -8369,12 +8757,12 @@ export type components = { * Field Values * @description The field values that were used for this queue item */ - field_values?: components["schemas"]["NodeFieldValue"][]; + field_values?: components['schemas']['NodeFieldValue'][]; /** * Session * @description The fully-populated session to be executed */ - session: components["schemas"]["GraphExecutionState"]; + session: components['schemas']['GraphExecutionState']; }; /** * SessionQueueItemDTO @@ -8392,7 +8780,7 @@ export type components = { * @default pending * @enum {string} */ - status: "pending" | "in_progress" | "completed" | "failed" | "canceled"; + status: 'pending' | 'in_progress' | 'completed' | 'failed' | 'canceled'; /** * Priority * @description The priority of this queue item @@ -8443,7 +8831,7 @@ export type components = { * Field Values * @description The field values that were used for this queue item */ - field_values?: components["schemas"]["NodeFieldValue"][]; + field_values?: components['schemas']['NodeFieldValue'][]; }; /** SessionQueueStatus */ SessionQueueStatus: { @@ -8529,24 +8917,24 @@ export type components = { * Image * @description The image to show */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default show_image * @enum {string} */ - type: "show_image"; + type: 'show_image'; }; /** StableDiffusion1ModelCheckpointConfig */ StableDiffusion1ModelCheckpointConfig: { /** Model Name */ model_name: string; - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** * Model Type * @enum {string} */ - model_type: "main"; + model_type: 'main'; /** Path */ path: string; /** Description */ @@ -8555,24 +8943,24 @@ export type components = { * Model Format * @enum {string} */ - model_format: "checkpoint"; - error?: components["schemas"]["ModelError"]; + model_format: 'checkpoint'; + error?: components['schemas']['ModelError']; /** Vae */ vae?: string; /** Config */ config: string; - variant: components["schemas"]["ModelVariantType"]; + variant: components['schemas']['ModelVariantType']; }; /** StableDiffusion1ModelDiffusersConfig */ StableDiffusion1ModelDiffusersConfig: { /** Model Name */ model_name: string; - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** * Model Type * @enum {string} */ - model_type: "main"; + model_type: 'main'; /** Path */ path: string; /** Description */ @@ -8581,22 +8969,22 @@ export type components = { * Model Format * @enum {string} */ - model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + model_format: 'diffusers'; + error?: components['schemas']['ModelError']; /** Vae */ vae?: string; - variant: components["schemas"]["ModelVariantType"]; + variant: components['schemas']['ModelVariantType']; }; /** StableDiffusion2ModelCheckpointConfig */ StableDiffusion2ModelCheckpointConfig: { /** Model Name */ model_name: string; - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** * Model Type * @enum {string} */ - model_type: "main"; + model_type: 'main'; /** Path */ path: string; /** Description */ @@ -8605,24 +8993,24 @@ export type components = { * Model Format * @enum {string} */ - model_format: "checkpoint"; - error?: components["schemas"]["ModelError"]; + model_format: 'checkpoint'; + error?: components['schemas']['ModelError']; /** Vae */ vae?: string; /** Config */ config: string; - variant: components["schemas"]["ModelVariantType"]; + variant: components['schemas']['ModelVariantType']; }; /** StableDiffusion2ModelDiffusersConfig */ StableDiffusion2ModelDiffusersConfig: { /** Model Name */ model_name: string; - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** * Model Type * @enum {string} */ - model_type: "main"; + model_type: 'main'; /** Path */ path: string; /** Description */ @@ -8631,22 +9019,22 @@ export type components = { * Model Format * @enum {string} */ - model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + model_format: 'diffusers'; + error?: components['schemas']['ModelError']; /** Vae */ vae?: string; - variant: components["schemas"]["ModelVariantType"]; + variant: components['schemas']['ModelVariantType']; }; /** StableDiffusionXLModelCheckpointConfig */ StableDiffusionXLModelCheckpointConfig: { /** Model Name */ model_name: string; - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** * Model Type * @enum {string} */ - model_type: "main"; + model_type: 'main'; /** Path */ path: string; /** Description */ @@ -8655,24 +9043,24 @@ export type components = { * Model Format * @enum {string} */ - model_format: "checkpoint"; - error?: components["schemas"]["ModelError"]; + model_format: 'checkpoint'; + error?: components['schemas']['ModelError']; /** Vae */ vae?: string; /** Config */ config: string; - variant: components["schemas"]["ModelVariantType"]; + variant: components['schemas']['ModelVariantType']; }; /** StableDiffusionXLModelDiffusersConfig */ StableDiffusionXLModelDiffusersConfig: { /** Model Name */ model_name: string; - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** * Model Type * @enum {string} */ - model_type: "main"; + model_type: 'main'; /** Path */ path: string; /** Description */ @@ -8681,11 +9069,11 @@ export type components = { * Model Format * @enum {string} */ - model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + model_format: 'diffusers'; + error?: components['schemas']['ModelError']; /** Vae */ vae?: string; - variant: components["schemas"]["ModelVariantType"]; + variant: components['schemas']['ModelVariantType']; }; /** * Step Param Easing @@ -8720,7 +9108,38 @@ export type components = { * @default Linear * @enum {string} */ - easing?: "Linear" | "QuadIn" | "QuadOut" | "QuadInOut" | "CubicIn" | "CubicOut" | "CubicInOut" | "QuarticIn" | "QuarticOut" | "QuarticInOut" | "QuinticIn" | "QuinticOut" | "QuinticInOut" | "SineIn" | "SineOut" | "SineInOut" | "CircularIn" | "CircularOut" | "CircularInOut" | "ExponentialIn" | "ExponentialOut" | "ExponentialInOut" | "ElasticIn" | "ElasticOut" | "ElasticInOut" | "BackIn" | "BackOut" | "BackInOut" | "BounceIn" | "BounceOut" | "BounceInOut"; + easing?: + | 'Linear' + | 'QuadIn' + | 'QuadOut' + | 'QuadInOut' + | 'CubicIn' + | 'CubicOut' + | 'CubicInOut' + | 'QuarticIn' + | 'QuarticOut' + | 'QuarticInOut' + | 'QuinticIn' + | 'QuinticOut' + | 'QuinticInOut' + | 'SineIn' + | 'SineOut' + | 'SineInOut' + | 'CircularIn' + | 'CircularOut' + | 'CircularInOut' + | 'ExponentialIn' + | 'ExponentialOut' + | 'ExponentialInOut' + | 'ElasticIn' + | 'ElasticOut' + | 'ElasticInOut' + | 'BackIn' + | 'BackOut' + | 'BackInOut' + | 'BounceIn' + | 'BounceOut' + | 'BounceInOut'; /** * Num Steps * @description number of denoising steps @@ -8778,7 +9197,7 @@ export type components = { * @default step_param_easing * @enum {string} */ - type: "step_param_easing"; + type: 'step_param_easing'; }; /** * String2Output @@ -8800,7 +9219,7 @@ export type components = { * @default string_2_output * @enum {string} */ - type: "string_2_output"; + type: 'string_2_output'; }; /** * String Collection Primitive @@ -8839,7 +9258,7 @@ export type components = { * @default string_collection * @enum {string} */ - type: "string_collection"; + type: 'string_collection'; }; /** * StringCollectionOutput @@ -8856,7 +9275,7 @@ export type components = { * @default string_collection_output * @enum {string} */ - type: "string_collection_output"; + type: 'string_collection_output'; }; /** * String Primitive @@ -8896,7 +9315,7 @@ export type components = { * @default string * @enum {string} */ - type: "string"; + type: 'string'; }; /** * String Join @@ -8942,7 +9361,7 @@ export type components = { * @default string_join * @enum {string} */ - type: "string_join"; + type: 'string_join'; }; /** * String Join Three @@ -8994,7 +9413,7 @@ export type components = { * @default string_join_three * @enum {string} */ - type: "string_join_three"; + type: 'string_join_three'; }; /** * StringOutput @@ -9011,7 +9430,7 @@ export type components = { * @default string_output * @enum {string} */ - type: "string_output"; + type: 'string_output'; }; /** * StringPosNegOutput @@ -9033,7 +9452,7 @@ export type components = { * @default string_pos_neg_output * @enum {string} */ - type: "string_pos_neg_output"; + type: 'string_pos_neg_output'; }; /** * String Replace @@ -9091,7 +9510,7 @@ export type components = { * @default string_replace * @enum {string} */ - type: "string_replace"; + type: 'string_replace'; }; /** * String Split @@ -9137,7 +9556,7 @@ export type components = { * @default string_split * @enum {string} */ - type: "string_split"; + type: 'string_split'; }; /** * String Split Negative @@ -9177,14 +9596,24 @@ export type components = { * @default string_split_neg * @enum {string} */ - type: "string_split_neg"; + type: 'string_split_neg'; }; /** * SubModelType * @description An enumeration. * @enum {string} */ - SubModelType: "unet" | "text_encoder" | "text_encoder_2" | "tokenizer" | "tokenizer_2" | "vae" | "vae_decoder" | "vae_encoder" | "scheduler" | "safety_checker"; + SubModelType: + | 'unet' + | 'text_encoder' + | 'text_encoder_2' + | 'tokenizer' + | 'tokenizer_2' + | 'vae' + | 'vae_decoder' + | 'vae_encoder' + | 'scheduler' + | 'safety_checker'; /** * Subtract Integers * @description Subtracts two numbers @@ -9229,7 +9658,7 @@ export type components = { * @default sub * @enum {string} */ - type: "sub"; + type: 'sub'; }; /** T2IAdapterField */ T2IAdapterField: { @@ -9237,12 +9666,12 @@ export type components = { * Image * @description The T2I-Adapter image prompt. */ - image: components["schemas"]["ImageField"]; + image: components['schemas']['ImageField']; /** * T2I Adapter Model * @description The T2I-Adapter model to use. */ - t2i_adapter_model: components["schemas"]["T2IAdapterModelField"]; + t2i_adapter_model: components['schemas']['T2IAdapterModelField']; /** * Weight * @description The weight given to the T2I-Adapter @@ -9267,7 +9696,11 @@ export type components = { * @default just_resize * @enum {string} */ - resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; + resize_mode?: + | 'just_resize' + | 'crop_resize' + | 'fill_resize' + | 'just_resize_simple'; }; /** * T2I-Adapter @@ -9300,12 +9733,12 @@ export type components = { * Image * @description The IP-Adapter image prompt. */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * T2I-Adapter Model * @description The T2I-Adapter model. */ - t2i_adapter_model: components["schemas"]["T2IAdapterModelField"]; + ip_adapter_model: components['schemas']['T2IAdapterModelField']; /** * Weight * @description The weight given to the T2I-Adapter @@ -9330,24 +9763,28 @@ export type components = { * @default just_resize * @enum {string} */ - resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; + resize_mode?: + | 'just_resize' + | 'crop_resize' + | 'fill_resize' + | 'just_resize_simple'; /** * Type * @default t2i_adapter * @enum {string} */ - type: "t2i_adapter"; + type: 't2i_adapter'; }; /** T2IAdapterModelDiffusersConfig */ T2IAdapterModelDiffusersConfig: { /** Model Name */ model_name: string; - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** * Model Type * @enum {string} */ - model_type: "t2i_adapter"; + model_type: 't2i_adapter'; /** Path */ path: string; /** Description */ @@ -9356,8 +9793,8 @@ export type components = { * Model Format * @enum {string} */ - model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + model_format: 'diffusers'; + error?: components['schemas']['ModelError']; }; /** T2IAdapterModelField */ T2IAdapterModelField: { @@ -9367,7 +9804,7 @@ export type components = { */ model_name: string; /** @description Base model */ - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; }; /** * T2IAdapterOutput @@ -9380,31 +9817,31 @@ export type components = { * T2I Adapter * @description T2I-Adapter(s) to apply */ - t2i_adapter: components["schemas"]["T2IAdapterField"]; + t2i_adapter: components['schemas']['T2IAdapterField']; /** * Type * @default t2i_adapter_output * @enum {string} */ - type: "t2i_adapter_output"; + type: 't2i_adapter_output'; }; /** TextualInversionModelConfig */ TextualInversionModelConfig: { /** Model Name */ model_name: string; - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** * Model Type * @enum {string} */ - model_type: "embedding"; + model_type: 'embedding'; /** Path */ path: string; /** Description */ description?: string; /** Model Format */ model_format: null; - error?: components["schemas"]["ModelError"]; + error?: components['schemas']['ModelError']; }; /** * Tile Resample Processor @@ -9437,13 +9874,13 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default tile_image_processor * @enum {string} */ - type: "tile_image_processor"; + type: 'tile_image_processor'; /** * Down Sampling Rate * @description Down sampling rate @@ -9457,17 +9894,17 @@ export type components = { * Unet * @description Info to load unet submodel */ - unet: components["schemas"]["ModelInfo"]; + unet: components['schemas']['ModelInfo']; /** * Scheduler * @description Info to load scheduler submodel */ - scheduler: components["schemas"]["ModelInfo"]; + scheduler: components['schemas']['ModelInfo']; /** * Loras * @description Loras to apply on model loading */ - loras: components["schemas"]["LoraInfo"][]; + loras: components['schemas']['LoraInfo'][]; /** * Seamless Axes * @description Axes("x" and "y") to which apply seamless @@ -9498,7 +9935,7 @@ export type components = { */ model_name: string; /** @description Base model */ - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; }; /** VaeField */ VaeField: { @@ -9506,7 +9943,7 @@ export type components = { * Vae * @description Info to load vae submodel */ - vae: components["schemas"]["ModelInfo"]; + vae: components['schemas']['ModelInfo']; /** * Seamless Axes * @description Axes("x" and "y") to which apply seamless @@ -9544,13 +9981,13 @@ export type components = { * VAE * @description VAE model to load */ - vae_model: components["schemas"]["VAEModelField"]; + vae_model: components['schemas']['VAEModelField']; /** * Type * @default vae_loader * @enum {string} */ - type: "vae_loader"; + type: 'vae_loader'; }; /** * VaeLoaderOutput @@ -9561,37 +9998,37 @@ export type components = { * VAE * @description VAE */ - vae: components["schemas"]["VaeField"]; + vae: components['schemas']['VaeField']; /** * Type * @default vae_loader_output * @enum {string} */ - type: "vae_loader_output"; + type: 'vae_loader_output'; }; /** VaeModelConfig */ VaeModelConfig: { /** Model Name */ model_name: string; - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** * Model Type * @enum {string} */ - model_type: "vae"; + model_type: 'vae'; /** Path */ path: string; /** Description */ description?: string; - model_format: components["schemas"]["VaeModelFormat"]; - error?: components["schemas"]["ModelError"]; + model_format: components['schemas']['VaeModelFormat']; + error?: components['schemas']['ModelError']; }; /** * VaeModelFormat * @description An enumeration. * @enum {string} */ - VaeModelFormat: "checkpoint" | "diffusers"; + VaeModelFormat: 'checkpoint' | 'diffusers'; /** ValidationError */ ValidationError: { /** Location */ @@ -9632,13 +10069,13 @@ export type components = { * Image * @description The image to process */ - image?: components["schemas"]["ImageField"]; + image?: components['schemas']['ImageField']; /** * Type * @default zoe_depth_image_processor * @enum {string} */ - type: "zoe_depth_image_processor"; + type: 'zoe_depth_image_processor'; }; /** * UIConfigBase @@ -9675,20 +10112,66 @@ export type components = { * - `Input.Any`: The field may have its value provided either directly or by a connection. * @enum {string} */ - Input: "connection" | "direct" | "any"; + Input: 'connection' | 'direct' | 'any'; /** * UIType * @description Type hints for the UI. * If a field should be provided a data type that does not exactly match the python type of the field, use this to provide the type that should be used instead. See the node development docs for detail on adding a new field type, which involves client-side changes. * @enum {string} */ - UIType: "boolean" | "ColorField" | "ConditioningField" | "ControlField" | "float" | "ImageField" | "integer" | "LatentsField" | "string" | "BooleanCollection" | "ColorCollection" | "ConditioningCollection" | "ControlCollection" | "FloatCollection" | "ImageCollection" | "IntegerCollection" | "LatentsCollection" | "StringCollection" | "BooleanPolymorphic" | "ColorPolymorphic" | "ConditioningPolymorphic" | "ControlPolymorphic" | "FloatPolymorphic" | "ImagePolymorphic" | "IntegerPolymorphic" | "LatentsPolymorphic" | "StringPolymorphic" | "MainModelField" | "SDXLMainModelField" | "SDXLRefinerModelField" | "ONNXModelField" | "VaeModelField" | "LoRAModelField" | "ControlNetModelField" | "IPAdapterModelField" | "UNetField" | "VaeField" | "ClipField" | "Collection" | "CollectionItem" | "enum" | "Scheduler" | "WorkflowField" | "IsIntermediate" | "MetadataField" | "BoardField"; + UIType: + | 'boolean' + | 'ColorField' + | 'ConditioningField' + | 'ControlField' + | 'float' + | 'ImageField' + | 'integer' + | 'LatentsField' + | 'string' + | 'BooleanCollection' + | 'ColorCollection' + | 'ConditioningCollection' + | 'ControlCollection' + | 'FloatCollection' + | 'ImageCollection' + | 'IntegerCollection' + | 'LatentsCollection' + | 'StringCollection' + | 'BooleanPolymorphic' + | 'ColorPolymorphic' + | 'ConditioningPolymorphic' + | 'ControlPolymorphic' + | 'FloatPolymorphic' + | 'ImagePolymorphic' + | 'IntegerPolymorphic' + | 'LatentsPolymorphic' + | 'StringPolymorphic' + | 'MainModelField' + | 'SDXLMainModelField' + | 'SDXLRefinerModelField' + | 'ONNXModelField' + | 'VaeModelField' + | 'LoRAModelField' + | 'ControlNetModelField' + | 'IPAdapterModelField' + | 'UNetField' + | 'VaeField' + | 'ClipField' + | 'Collection' + | 'CollectionItem' + | 'enum' + | 'Scheduler' + | 'WorkflowField' + | 'IsIntermediate' + | 'MetadataField' + | 'BoardField'; /** * UIComponent * @description The type of UI component to use for a field, used to override the default components, which are inferred from the field type. * @enum {string} */ - UIComponent: "none" | "textarea" | "slider"; + UIComponent: 'none' | 'textarea' | 'slider'; /** * _InputField * @description *DO NOT USE* @@ -9697,11 +10180,11 @@ export type components = { * purpose in the backend. */ _InputField: { - input: components["schemas"]["Input"]; + input: components['schemas']['Input']; /** Ui Hidden */ ui_hidden: boolean; - ui_type?: components["schemas"]["UIType"]; - ui_component?: components["schemas"]["UIComponent"]; + ui_type?: components['schemas']['UIType']; + ui_component?: components['schemas']['UIComponent']; /** Ui Order */ ui_order?: number; /** Ui Choice Labels */ @@ -9721,7 +10204,7 @@ export type components = { _OutputField: { /** Ui Hidden */ ui_hidden: boolean; - ui_type?: components["schemas"]["UIType"]; + ui_type?: components['schemas']['UIType']; /** Ui Order */ ui_order?: number; }; @@ -9730,49 +10213,61 @@ export type components = { * @description An enumeration. * @enum {string} */ - StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusionOnnxModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; - /** - * T2IAdapterModelFormat - * @description An enumeration. - * @enum {string} - */ - T2IAdapterModelFormat: "diffusers"; - /** - * StableDiffusionXLModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusion2ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; - /** - * IPAdapterModelFormat - * @description An enumeration. - * @enum {string} - */ - IPAdapterModelFormat: "invokeai"; + T2IAdapterModelFormat: 'diffusers'; /** * ControlNetModelFormat * @description An enumeration. * @enum {string} */ - ControlNetModelFormat: "checkpoint" | "diffusers"; + ControlNetModelFormat: 'checkpoint' | 'diffusers'; /** - * CLIPVisionModelFormat + * StableDiffusion2ModelFormat * @description An enumeration. * @enum {string} */ - CLIPVisionModelFormat: "diffusers"; + StableDiffusion2ModelFormat: 'checkpoint' | 'diffusers'; + /** + * StableDiffusionXLModelFormat + * @description An enumeration. + * @enum {string} + */ + T2IAdapterModelFormat: 'diffusers'; + /** + * StableDiffusionXLModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionXLModelFormat: 'checkpoint' | 'diffusers'; + /** + * IPAdapterModelFormat + * @description An enumeration. + * @enum {string} + */ + IPAdapterModelFormat: 'invokeai'; + /** + * ControlNetModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: 'checkpoint' | 'diffusers'; + /** + * StableDiffusionOnnxModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionOnnxModelFormat: 'olive' | 'onnx'; + /** + * StableDiffusion2ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion2ModelFormat: 'checkpoint' | 'diffusers'; + /** + * ControlNetModelFormat + * @description An enumeration. + * @enum {string} + */ + ControlNetModelFormat: 'checkpoint' | 'diffusers'; }; responses: never; parameters: never; @@ -9786,7 +10281,6 @@ export type $defs = Record; export type external = Record; export type operations = { - /** * List Sessions * @deprecated @@ -9807,13 +10301,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["PaginatedResults_GraphExecutionState_"]; + 'application/json': components['schemas']['PaginatedResults_GraphExecutionState_']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -9832,14 +10326,14 @@ export type operations = { }; requestBody?: { content: { - "application/json": components["schemas"]["Graph"]; + 'application/json': components['schemas']['Graph']; }; }; responses: { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["GraphExecutionState"]; + 'application/json': components['schemas']['GraphExecutionState']; }; }; /** @description Invalid json */ @@ -9849,7 +10343,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -9870,7 +10364,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["GraphExecutionState"]; + 'application/json': components['schemas']['GraphExecutionState']; }; }; /** @description Session not found */ @@ -9880,7 +10374,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -9899,14 +10393,126 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; + 'application/json': + | components['schemas']['BooleanInvocation'] + | components['schemas']['BooleanCollectionInvocation'] + | components['schemas']['IntegerInvocation'] + | components['schemas']['IntegerCollectionInvocation'] + | components['schemas']['FloatInvocation'] + | components['schemas']['FloatCollectionInvocation'] + | components['schemas']['StringInvocation'] + | components['schemas']['StringCollectionInvocation'] + | components['schemas']['ImageInvocation'] + | components['schemas']['ImageCollectionInvocation'] + | components['schemas']['LatentsInvocation'] + | components['schemas']['LatentsCollectionInvocation'] + | components['schemas']['ColorInvocation'] + | components['schemas']['ConditioningInvocation'] + | components['schemas']['ConditioningCollectionInvocation'] + | components['schemas']['ControlNetInvocation'] + | components['schemas']['ImageProcessorInvocation'] + | components['schemas']['MainModelLoaderInvocation'] + | components['schemas']['LoraLoaderInvocation'] + | components['schemas']['SDXLLoraLoaderInvocation'] + | components['schemas']['VaeLoaderInvocation'] + | components['schemas']['SeamlessModeInvocation'] + | components['schemas']['SDXLModelLoaderInvocation'] + | components['schemas']['SDXLRefinerModelLoaderInvocation'] + | components['schemas']['MetadataAccumulatorInvocation'] + | components['schemas']['IPAdapterInvocation'] + | components['schemas']['CompelInvocation'] + | components['schemas']['SDXLCompelPromptInvocation'] + | components['schemas']['SDXLRefinerCompelPromptInvocation'] + | components['schemas']['ClipSkipInvocation'] + | components['schemas']['SchedulerInvocation'] + | components['schemas']['CreateDenoiseMaskInvocation'] + | components['schemas']['DenoiseLatentsInvocation'] + | components['schemas']['LatentsToImageInvocation'] + | components['schemas']['ResizeLatentsInvocation'] + | components['schemas']['ScaleLatentsInvocation'] + | components['schemas']['ImageToLatentsInvocation'] + | components['schemas']['BlendLatentsInvocation'] + | components['schemas']['ONNXPromptInvocation'] + | components['schemas']['ONNXTextToLatentsInvocation'] + | components['schemas']['ONNXLatentsToImageInvocation'] + | components['schemas']['OnnxModelLoaderInvocation'] + | components['schemas']['ShowImageInvocation'] + | components['schemas']['BlankImageInvocation'] + | components['schemas']['ImageCropInvocation'] + | components['schemas']['ImagePasteInvocation'] + | components['schemas']['MaskFromAlphaInvocation'] + | components['schemas']['ImageMultiplyInvocation'] + | components['schemas']['ImageChannelInvocation'] + | components['schemas']['ImageConvertInvocation'] + | components['schemas']['ImageBlurInvocation'] + | components['schemas']['ImageResizeInvocation'] + | components['schemas']['ImageScaleInvocation'] + | components['schemas']['ImageLerpInvocation'] + | components['schemas']['ImageInverseLerpInvocation'] + | components['schemas']['ImageNSFWBlurInvocation'] + | components['schemas']['ImageWatermarkInvocation'] + | components['schemas']['MaskEdgeInvocation'] + | components['schemas']['MaskCombineInvocation'] + | components['schemas']['ColorCorrectInvocation'] + | components['schemas']['ImageHueAdjustmentInvocation'] + | components['schemas']['ImageChannelOffsetInvocation'] + | components['schemas']['ImageChannelMultiplyInvocation'] + | components['schemas']['SaveImageInvocation'] + | components['schemas']['DynamicPromptInvocation'] + | components['schemas']['PromptsFromFileInvocation'] + | components['schemas']['CvInpaintInvocation'] + | components['schemas']['FloatLinearRangeInvocation'] + | components['schemas']['StepParamEasingInvocation'] + | components['schemas']['AddInvocation'] + | components['schemas']['SubtractInvocation'] + | components['schemas']['MultiplyInvocation'] + | components['schemas']['DivideInvocation'] + | components['schemas']['RandomIntInvocation'] + | components['schemas']['FloatToIntegerInvocation'] + | components['schemas']['RoundInvocation'] + | components['schemas']['IntegerMathInvocation'] + | components['schemas']['FloatMathInvocation'] + | components['schemas']['NoiseInvocation'] + | components['schemas']['RangeInvocation'] + | components['schemas']['RangeOfSizeInvocation'] + | components['schemas']['RandomRangeInvocation'] + | components['schemas']['ESRGANInvocation'] + | components['schemas']['StringSplitNegInvocation'] + | components['schemas']['StringSplitInvocation'] + | components['schemas']['StringJoinInvocation'] + | components['schemas']['StringJoinThreeInvocation'] + | components['schemas']['StringReplaceInvocation'] + | components['schemas']['InfillColorInvocation'] + | components['schemas']['InfillTileInvocation'] + | components['schemas']['InfillPatchMatchInvocation'] + | components['schemas']['LaMaInfillInvocation'] + | components['schemas']['CV2InfillInvocation'] + | components['schemas']['GraphInvocation'] + | components['schemas']['IterateInvocation'] + | components['schemas']['CollectInvocation'] + | components['schemas']['CannyImageProcessorInvocation'] + | components['schemas']['HedImageProcessorInvocation'] + | components['schemas']['LineartImageProcessorInvocation'] + | components['schemas']['LineartAnimeImageProcessorInvocation'] + | components['schemas']['OpenposeImageProcessorInvocation'] + | components['schemas']['MidasDepthImageProcessorInvocation'] + | components['schemas']['NormalbaeImageProcessorInvocation'] + | components['schemas']['MlsdImageProcessorInvocation'] + | components['schemas']['PidiImageProcessorInvocation'] + | components['schemas']['ContentShuffleImageProcessorInvocation'] + | components['schemas']['ZoeDepthImageProcessorInvocation'] + | components['schemas']['MediapipeFaceProcessorInvocation'] + | components['schemas']['LeresImageProcessorInvocation'] + | components['schemas']['TileResamplerProcessorInvocation'] + | components['schemas']['SegmentAnythingProcessorInvocation'] + | components['schemas']['ColorMapImageProcessorInvocation']; }; }; responses: { /** @description Successful Response */ 200: { content: { - "application/json": string; + 'application/json': string; }; }; /** @description Invalid node or link */ @@ -9920,7 +10526,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -9941,14 +10547,126 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; + 'application/json': + | components['schemas']['BooleanInvocation'] + | components['schemas']['BooleanCollectionInvocation'] + | components['schemas']['IntegerInvocation'] + | components['schemas']['IntegerCollectionInvocation'] + | components['schemas']['FloatInvocation'] + | components['schemas']['FloatCollectionInvocation'] + | components['schemas']['StringInvocation'] + | components['schemas']['StringCollectionInvocation'] + | components['schemas']['ImageInvocation'] + | components['schemas']['ImageCollectionInvocation'] + | components['schemas']['LatentsInvocation'] + | components['schemas']['LatentsCollectionInvocation'] + | components['schemas']['ColorInvocation'] + | components['schemas']['ConditioningInvocation'] + | components['schemas']['ConditioningCollectionInvocation'] + | components['schemas']['ControlNetInvocation'] + | components['schemas']['ImageProcessorInvocation'] + | components['schemas']['MainModelLoaderInvocation'] + | components['schemas']['LoraLoaderInvocation'] + | components['schemas']['SDXLLoraLoaderInvocation'] + | components['schemas']['VaeLoaderInvocation'] + | components['schemas']['SeamlessModeInvocation'] + | components['schemas']['SDXLModelLoaderInvocation'] + | components['schemas']['SDXLRefinerModelLoaderInvocation'] + | components['schemas']['MetadataAccumulatorInvocation'] + | components['schemas']['IPAdapterInvocation'] + | components['schemas']['CompelInvocation'] + | components['schemas']['SDXLCompelPromptInvocation'] + | components['schemas']['SDXLRefinerCompelPromptInvocation'] + | components['schemas']['ClipSkipInvocation'] + | components['schemas']['SchedulerInvocation'] + | components['schemas']['CreateDenoiseMaskInvocation'] + | components['schemas']['DenoiseLatentsInvocation'] + | components['schemas']['LatentsToImageInvocation'] + | components['schemas']['ResizeLatentsInvocation'] + | components['schemas']['ScaleLatentsInvocation'] + | components['schemas']['ImageToLatentsInvocation'] + | components['schemas']['BlendLatentsInvocation'] + | components['schemas']['ONNXPromptInvocation'] + | components['schemas']['ONNXTextToLatentsInvocation'] + | components['schemas']['ONNXLatentsToImageInvocation'] + | components['schemas']['OnnxModelLoaderInvocation'] + | components['schemas']['ShowImageInvocation'] + | components['schemas']['BlankImageInvocation'] + | components['schemas']['ImageCropInvocation'] + | components['schemas']['ImagePasteInvocation'] + | components['schemas']['MaskFromAlphaInvocation'] + | components['schemas']['ImageMultiplyInvocation'] + | components['schemas']['ImageChannelInvocation'] + | components['schemas']['ImageConvertInvocation'] + | components['schemas']['ImageBlurInvocation'] + | components['schemas']['ImageResizeInvocation'] + | components['schemas']['ImageScaleInvocation'] + | components['schemas']['ImageLerpInvocation'] + | components['schemas']['ImageInverseLerpInvocation'] + | components['schemas']['ImageNSFWBlurInvocation'] + | components['schemas']['ImageWatermarkInvocation'] + | components['schemas']['MaskEdgeInvocation'] + | components['schemas']['MaskCombineInvocation'] + | components['schemas']['ColorCorrectInvocation'] + | components['schemas']['ImageHueAdjustmentInvocation'] + | components['schemas']['ImageChannelOffsetInvocation'] + | components['schemas']['ImageChannelMultiplyInvocation'] + | components['schemas']['SaveImageInvocation'] + | components['schemas']['DynamicPromptInvocation'] + | components['schemas']['PromptsFromFileInvocation'] + | components['schemas']['CvInpaintInvocation'] + | components['schemas']['FloatLinearRangeInvocation'] + | components['schemas']['StepParamEasingInvocation'] + | components['schemas']['AddInvocation'] + | components['schemas']['SubtractInvocation'] + | components['schemas']['MultiplyInvocation'] + | components['schemas']['DivideInvocation'] + | components['schemas']['RandomIntInvocation'] + | components['schemas']['FloatToIntegerInvocation'] + | components['schemas']['RoundInvocation'] + | components['schemas']['IntegerMathInvocation'] + | components['schemas']['FloatMathInvocation'] + | components['schemas']['NoiseInvocation'] + | components['schemas']['RangeInvocation'] + | components['schemas']['RangeOfSizeInvocation'] + | components['schemas']['RandomRangeInvocation'] + | components['schemas']['ESRGANInvocation'] + | components['schemas']['StringSplitNegInvocation'] + | components['schemas']['StringSplitInvocation'] + | components['schemas']['StringJoinInvocation'] + | components['schemas']['StringJoinThreeInvocation'] + | components['schemas']['StringReplaceInvocation'] + | components['schemas']['InfillColorInvocation'] + | components['schemas']['InfillTileInvocation'] + | components['schemas']['InfillPatchMatchInvocation'] + | components['schemas']['LaMaInfillInvocation'] + | components['schemas']['CV2InfillInvocation'] + | components['schemas']['GraphInvocation'] + | components['schemas']['IterateInvocation'] + | components['schemas']['CollectInvocation'] + | components['schemas']['CannyImageProcessorInvocation'] + | components['schemas']['HedImageProcessorInvocation'] + | components['schemas']['LineartImageProcessorInvocation'] + | components['schemas']['LineartAnimeImageProcessorInvocation'] + | components['schemas']['OpenposeImageProcessorInvocation'] + | components['schemas']['MidasDepthImageProcessorInvocation'] + | components['schemas']['NormalbaeImageProcessorInvocation'] + | components['schemas']['MlsdImageProcessorInvocation'] + | components['schemas']['PidiImageProcessorInvocation'] + | components['schemas']['ContentShuffleImageProcessorInvocation'] + | components['schemas']['ZoeDepthImageProcessorInvocation'] + | components['schemas']['MediapipeFaceProcessorInvocation'] + | components['schemas']['LeresImageProcessorInvocation'] + | components['schemas']['TileResamplerProcessorInvocation'] + | components['schemas']['SegmentAnythingProcessorInvocation'] + | components['schemas']['ColorMapImageProcessorInvocation']; }; }; responses: { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["GraphExecutionState"]; + 'application/json': components['schemas']['GraphExecutionState']; }; }; /** @description Invalid node or link */ @@ -9962,7 +10680,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -9985,7 +10703,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["GraphExecutionState"]; + 'application/json': components['schemas']['GraphExecutionState']; }; }; /** @description Invalid node or link */ @@ -9999,7 +10717,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10018,14 +10736,14 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["Edge"]; + 'application/json': components['schemas']['Edge']; }; }; responses: { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["GraphExecutionState"]; + 'application/json': components['schemas']['GraphExecutionState']; }; }; /** @description Invalid node or link */ @@ -10039,7 +10757,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10068,7 +10786,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["GraphExecutionState"]; + 'application/json': components['schemas']['GraphExecutionState']; }; }; /** @description Invalid node or link */ @@ -10082,7 +10800,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10109,7 +10827,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": unknown; + 'application/json': unknown; }; }; /** @description The invocation is queued */ @@ -10127,7 +10845,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10148,7 +10866,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": unknown; + 'application/json': unknown; }; }; /** @description The invocation is canceled */ @@ -10158,7 +10876,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10170,20 +10888,20 @@ export type operations = { parse_dynamicprompts: { requestBody: { content: { - "application/json": components["schemas"]["Body_parse_dynamicprompts"]; + 'application/json': components['schemas']['Body_parse_dynamicprompts']; }; }; responses: { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["DynamicPromptsResponse"]; + 'application/json': components['schemas']['DynamicPromptsResponse']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10196,22 +10914,22 @@ export type operations = { parameters: { query?: { /** @description Base models to include */ - base_models?: components["schemas"]["BaseModelType"][]; + base_models?: components['schemas']['BaseModelType'][]; /** @description The type of model to get */ - model_type?: components["schemas"]["ModelType"]; + model_type?: components['schemas']['ModelType']; }; }; responses: { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["ModelsList"]; + 'application/json': components['schemas']['ModelsList']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10224,9 +10942,9 @@ export type operations = { parameters: { path: { /** @description Base model */ - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** @description The type of model */ - model_type: components["schemas"]["ModelType"]; + model_type: components['schemas']['ModelType']; /** @description model name */ model_name: string; }; @@ -10243,7 +10961,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10256,23 +10974,55 @@ export type operations = { parameters: { path: { /** @description Base model */ - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** @description The type of model */ - model_type: components["schemas"]["ModelType"]; + model_type: components['schemas']['ModelType']; /** @description model name */ model_name: string; }; }; requestBody: { content: { - "application/json": components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["IPAdapterModelInvokeAIConfig"] | components["schemas"]["CLIPVisionModelDiffusersConfig"] | components["schemas"]["T2IAdapterModelDiffusersConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"]; + 'application/json': + | components['schemas']['ONNXStableDiffusion1ModelConfig'] + | components['schemas']['StableDiffusion1ModelCheckpointConfig'] + | components['schemas']['StableDiffusion1ModelDiffusersConfig'] + | components['schemas']['VaeModelConfig'] + | components['schemas']['LoRAModelConfig'] + | components['schemas']['ControlNetModelCheckpointConfig'] + | components['schemas']['ControlNetModelDiffusersConfig'] + | components['schemas']['TextualInversionModelConfig'] + | components['schemas']['IPAdapterModelInvokeAIConfig'] + | components['schemas']['CLIPVisionModelDiffusersConfig'] + | components['schemas']['T2IAdapterModelDiffusersConfig'] + | components['schemas']['ONNXStableDiffusion2ModelConfig'] + | components['schemas']['StableDiffusion2ModelCheckpointConfig'] + | components['schemas']['StableDiffusion2ModelDiffusersConfig'] + | components['schemas']['StableDiffusionXLModelCheckpointConfig'] + | components['schemas']['StableDiffusionXLModelDiffusersConfig']; }; }; responses: { /** @description The model was updated successfully */ 200: { content: { - "application/json": components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["IPAdapterModelInvokeAIConfig"] | components["schemas"]["CLIPVisionModelDiffusersConfig"] | components["schemas"]["T2IAdapterModelDiffusersConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"]; + 'application/json': + | components['schemas']['ONNXStableDiffusion1ModelConfig'] + | components['schemas']['StableDiffusion1ModelCheckpointConfig'] + | components['schemas']['StableDiffusion1ModelDiffusersConfig'] + | components['schemas']['VaeModelConfig'] + | components['schemas']['LoRAModelConfig'] + | components['schemas']['ControlNetModelCheckpointConfig'] + | components['schemas']['ControlNetModelDiffusersConfig'] + | components['schemas']['TextualInversionModelConfig'] + | components['schemas']['IPAdapterModelInvokeAIConfig'] + | components['schemas']['CLIPVisionModelDiffusersConfig'] + | components['schemas']['T2IAdapterModelDiffusersConfig'] + | components['schemas']['ONNXStableDiffusion2ModelConfig'] + | components['schemas']['StableDiffusion2ModelCheckpointConfig'] + | components['schemas']['StableDiffusion2ModelDiffusersConfig'] + | components['schemas']['StableDiffusionXLModelCheckpointConfig'] + | components['schemas']['StableDiffusionXLModelDiffusersConfig']; }; }; /** @description Bad request */ @@ -10290,7 +11040,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10302,14 +11052,30 @@ export type operations = { import_model: { requestBody: { content: { - "application/json": components["schemas"]["Body_import_model"]; + 'application/json': components['schemas']['Body_import_model']; }; }; responses: { /** @description The model imported successfully */ 201: { content: { - "application/json": components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["IPAdapterModelInvokeAIConfig"] | components["schemas"]["CLIPVisionModelDiffusersConfig"] | components["schemas"]["T2IAdapterModelDiffusersConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"]; + 'application/json': + | components['schemas']['ONNXStableDiffusion1ModelConfig'] + | components['schemas']['StableDiffusion1ModelCheckpointConfig'] + | components['schemas']['StableDiffusion1ModelDiffusersConfig'] + | components['schemas']['VaeModelConfig'] + | components['schemas']['LoRAModelConfig'] + | components['schemas']['ControlNetModelCheckpointConfig'] + | components['schemas']['ControlNetModelDiffusersConfig'] + | components['schemas']['TextualInversionModelConfig'] + | components['schemas']['IPAdapterModelInvokeAIConfig'] + | components['schemas']['CLIPVisionModelDiffusersConfig'] + | components['schemas']['T2IAdapterModelDiffusersConfig'] + | components['schemas']['ONNXStableDiffusion2ModelConfig'] + | components['schemas']['StableDiffusion2ModelCheckpointConfig'] + | components['schemas']['StableDiffusion2ModelDiffusersConfig'] + | components['schemas']['StableDiffusionXLModelCheckpointConfig'] + | components['schemas']['StableDiffusionXLModelDiffusersConfig']; }; }; /** @description The model could not be found */ @@ -10327,7 +11093,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; /** @description The model appeared to import successfully, but could not be found in the model manager */ @@ -10343,14 +11109,46 @@ export type operations = { add_model: { requestBody: { content: { - "application/json": components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["IPAdapterModelInvokeAIConfig"] | components["schemas"]["CLIPVisionModelDiffusersConfig"] | components["schemas"]["T2IAdapterModelDiffusersConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"]; + 'application/json': + | components['schemas']['ONNXStableDiffusion1ModelConfig'] + | components['schemas']['StableDiffusion1ModelCheckpointConfig'] + | components['schemas']['StableDiffusion1ModelDiffusersConfig'] + | components['schemas']['VaeModelConfig'] + | components['schemas']['LoRAModelConfig'] + | components['schemas']['ControlNetModelCheckpointConfig'] + | components['schemas']['ControlNetModelDiffusersConfig'] + | components['schemas']['TextualInversionModelConfig'] + | components['schemas']['IPAdapterModelInvokeAIConfig'] + | components['schemas']['CLIPVisionModelDiffusersConfig'] + | components['schemas']['T2IAdapterModelDiffusersConfig'] + | components['schemas']['ONNXStableDiffusion2ModelConfig'] + | components['schemas']['StableDiffusion2ModelCheckpointConfig'] + | components['schemas']['StableDiffusion2ModelDiffusersConfig'] + | components['schemas']['StableDiffusionXLModelCheckpointConfig'] + | components['schemas']['StableDiffusionXLModelDiffusersConfig']; }; }; responses: { /** @description The model added successfully */ 201: { content: { - "application/json": components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["IPAdapterModelInvokeAIConfig"] | components["schemas"]["CLIPVisionModelDiffusersConfig"] | components["schemas"]["T2IAdapterModelDiffusersConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"]; + 'application/json': + | components['schemas']['ONNXStableDiffusion1ModelConfig'] + | components['schemas']['StableDiffusion1ModelCheckpointConfig'] + | components['schemas']['StableDiffusion1ModelDiffusersConfig'] + | components['schemas']['VaeModelConfig'] + | components['schemas']['LoRAModelConfig'] + | components['schemas']['ControlNetModelCheckpointConfig'] + | components['schemas']['ControlNetModelDiffusersConfig'] + | components['schemas']['TextualInversionModelConfig'] + | components['schemas']['IPAdapterModelInvokeAIConfig'] + | components['schemas']['CLIPVisionModelDiffusersConfig'] + | components['schemas']['T2IAdapterModelDiffusersConfig'] + | components['schemas']['ONNXStableDiffusion2ModelConfig'] + | components['schemas']['StableDiffusion2ModelCheckpointConfig'] + | components['schemas']['StableDiffusion2ModelDiffusersConfig'] + | components['schemas']['StableDiffusionXLModelCheckpointConfig'] + | components['schemas']['StableDiffusionXLModelDiffusersConfig']; }; }; /** @description The model could not be found */ @@ -10364,7 +11162,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; /** @description The model appeared to add successfully, but could not be found in the model manager */ @@ -10385,9 +11183,9 @@ export type operations = { }; path: { /** @description Base model */ - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; /** @description The type of model */ - model_type: components["schemas"]["ModelType"]; + model_type: components['schemas']['ModelType']; /** @description model name */ model_name: string; }; @@ -10396,7 +11194,23 @@ export type operations = { /** @description Model converted successfully */ 200: { content: { - "application/json": components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["IPAdapterModelInvokeAIConfig"] | components["schemas"]["CLIPVisionModelDiffusersConfig"] | components["schemas"]["T2IAdapterModelDiffusersConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"]; + 'application/json': + | components['schemas']['ONNXStableDiffusion1ModelConfig'] + | components['schemas']['StableDiffusion1ModelCheckpointConfig'] + | components['schemas']['StableDiffusion1ModelDiffusersConfig'] + | components['schemas']['VaeModelConfig'] + | components['schemas']['LoRAModelConfig'] + | components['schemas']['ControlNetModelCheckpointConfig'] + | components['schemas']['ControlNetModelDiffusersConfig'] + | components['schemas']['TextualInversionModelConfig'] + | components['schemas']['IPAdapterModelInvokeAIConfig'] + | components['schemas']['CLIPVisionModelDiffusersConfig'] + | components['schemas']['T2IAdapterModelDiffusersConfig'] + | components['schemas']['ONNXStableDiffusion2ModelConfig'] + | components['schemas']['StableDiffusion2ModelCheckpointConfig'] + | components['schemas']['StableDiffusion2ModelDiffusersConfig'] + | components['schemas']['StableDiffusionXLModelCheckpointConfig'] + | components['schemas']['StableDiffusionXLModelDiffusersConfig']; }; }; /** @description Bad request */ @@ -10410,7 +11224,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10427,7 +11241,7 @@ export type operations = { /** @description Directory searched successfully */ 200: { content: { - "application/json": string[]; + 'application/json': string[]; }; }; /** @description Invalid directory path */ @@ -10437,7 +11251,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10451,7 +11265,7 @@ export type operations = { /** @description paths retrieved successfully */ 200: { content: { - "application/json": string[]; + 'application/json': string[]; }; }; }; @@ -10466,7 +11280,7 @@ export type operations = { /** @description synchronization successful */ 201: { content: { - "application/json": boolean; + 'application/json': boolean; }; }; }; @@ -10479,19 +11293,35 @@ export type operations = { parameters: { path: { /** @description Base model */ - base_model: components["schemas"]["BaseModelType"]; + base_model: components['schemas']['BaseModelType']; }; }; requestBody: { content: { - "application/json": components["schemas"]["Body_merge_models"]; + 'application/json': components['schemas']['Body_merge_models']; }; }; responses: { /** @description Model converted successfully */ 200: { content: { - "application/json": components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["IPAdapterModelInvokeAIConfig"] | components["schemas"]["CLIPVisionModelDiffusersConfig"] | components["schemas"]["T2IAdapterModelDiffusersConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"]; + 'application/json': + | components['schemas']['ONNXStableDiffusion1ModelConfig'] + | components['schemas']['StableDiffusion1ModelCheckpointConfig'] + | components['schemas']['StableDiffusion1ModelDiffusersConfig'] + | components['schemas']['VaeModelConfig'] + | components['schemas']['LoRAModelConfig'] + | components['schemas']['ControlNetModelCheckpointConfig'] + | components['schemas']['ControlNetModelDiffusersConfig'] + | components['schemas']['TextualInversionModelConfig'] + | components['schemas']['IPAdapterModelInvokeAIConfig'] + | components['schemas']['CLIPVisionModelDiffusersConfig'] + | components['schemas']['T2IAdapterModelDiffusersConfig'] + | components['schemas']['ONNXStableDiffusion2ModelConfig'] + | components['schemas']['StableDiffusion2ModelCheckpointConfig'] + | components['schemas']['StableDiffusion2ModelDiffusersConfig'] + | components['schemas']['StableDiffusionXLModelCheckpointConfig'] + | components['schemas']['StableDiffusionXLModelDiffusersConfig']; }; }; /** @description Incompatible models */ @@ -10505,7 +11335,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10518,7 +11348,7 @@ export type operations = { parameters: { query: { /** @description The category of the image */ - image_category: components["schemas"]["ImageCategory"]; + image_category: components['schemas']['ImageCategory']; /** @description Whether this is an intermediate image */ is_intermediate: boolean; /** @description The board to add this image to, if any */ @@ -10531,14 +11361,14 @@ export type operations = { }; requestBody: { content: { - "multipart/form-data": components["schemas"]["Body_upload_image"]; + 'multipart/form-data': components['schemas']['Body_upload_image']; }; }; responses: { /** @description The image was uploaded successfully */ 201: { content: { - "application/json": components["schemas"]["ImageDTO"]; + 'application/json': components['schemas']['ImageDTO']; }; }; /** @description Image upload failed */ @@ -10548,7 +11378,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10568,13 +11398,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["ImageDTO"]; + 'application/json': components['schemas']['ImageDTO']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10594,13 +11424,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": unknown; + 'application/json': unknown; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10618,20 +11448,20 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["ImageRecordChanges"]; + 'application/json': components['schemas']['ImageRecordChanges']; }; }; responses: { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["ImageDTO"]; + 'application/json': components['schemas']['ImageDTO']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10645,7 +11475,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": unknown; + 'application/json': unknown; }; }; }; @@ -10665,13 +11495,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["ImageMetadata"]; + 'application/json': components['schemas']['ImageMetadata']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10691,7 +11521,7 @@ export type operations = { /** @description Return the full-resolution image */ 200: { content: { - "image/png": unknown; + 'image/png': unknown; }; }; /** @description Image not found */ @@ -10701,7 +11531,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10721,7 +11551,7 @@ export type operations = { /** @description Return the image thumbnail */ 200: { content: { - "image/webp": unknown; + 'image/webp': unknown; }; }; /** @description Image not found */ @@ -10731,7 +11561,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10751,13 +11581,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["ImageUrlsDTO"]; + 'application/json': components['schemas']['ImageUrlsDTO']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10770,9 +11600,9 @@ export type operations = { parameters: { query?: { /** @description The origin of images to list. */ - image_origin?: components["schemas"]["ResourceOrigin"]; + image_origin?: components['schemas']['ResourceOrigin']; /** @description The categories of image to include. */ - categories?: components["schemas"]["ImageCategory"][]; + categories?: components['schemas']['ImageCategory'][]; /** @description Whether to list intermediate images. */ is_intermediate?: boolean; /** @description The board id to filter by. Use 'none' to find images without a board. */ @@ -10787,13 +11617,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["OffsetPaginatedResults_ImageDTO_"]; + 'application/json': components['schemas']['OffsetPaginatedResults_ImageDTO_']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10802,20 +11632,20 @@ export type operations = { delete_images_from_list: { requestBody: { content: { - "application/json": components["schemas"]["Body_delete_images_from_list"]; + 'application/json': components['schemas']['Body_delete_images_from_list']; }; }; responses: { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["DeleteImagesFromListResult"]; + 'application/json': components['schemas']['DeleteImagesFromListResult']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10824,20 +11654,20 @@ export type operations = { star_images_in_list: { requestBody: { content: { - "application/json": components["schemas"]["Body_star_images_in_list"]; + 'application/json': components['schemas']['Body_star_images_in_list']; }; }; responses: { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["ImagesUpdatedFromListResult"]; + 'application/json': components['schemas']['ImagesUpdatedFromListResult']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10846,20 +11676,20 @@ export type operations = { unstar_images_in_list: { requestBody: { content: { - "application/json": components["schemas"]["Body_unstar_images_in_list"]; + 'application/json': components['schemas']['Body_unstar_images_in_list']; }; }; responses: { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["ImagesUpdatedFromListResult"]; + 'application/json': components['schemas']['ImagesUpdatedFromListResult']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10868,20 +11698,20 @@ export type operations = { download_images_from_list: { requestBody: { content: { - "application/json": components["schemas"]["Body_download_images_from_list"]; + 'application/json': components['schemas']['Body_download_images_from_list']; }; }; responses: { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["ImagesDownloaded"]; + 'application/json': components['schemas']['ImagesDownloaded']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10905,13 +11735,15 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["OffsetPaginatedResults_BoardDTO_"] | components["schemas"]["BoardDTO"][]; + 'application/json': + | components['schemas']['OffsetPaginatedResults_BoardDTO_'] + | components['schemas']['BoardDTO'][]; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10931,13 +11763,13 @@ export type operations = { /** @description The board was created successfully */ 201: { content: { - "application/json": components["schemas"]["BoardDTO"]; + 'application/json': components['schemas']['BoardDTO']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10957,13 +11789,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["BoardDTO"]; + 'application/json': components['schemas']['BoardDTO']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -10987,13 +11819,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["DeleteBoardResult"]; + 'application/json': components['schemas']['DeleteBoardResult']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11011,20 +11843,20 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["BoardChanges"]; + 'application/json': components['schemas']['BoardChanges']; }; }; responses: { /** @description The board was updated successfully */ 201: { content: { - "application/json": components["schemas"]["BoardDTO"]; + 'application/json': components['schemas']['BoardDTO']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11044,13 +11876,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": string[]; + 'application/json': string[]; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11062,20 +11894,20 @@ export type operations = { add_image_to_board: { requestBody: { content: { - "application/json": components["schemas"]["Body_add_image_to_board"]; + 'application/json': components['schemas']['Body_add_image_to_board']; }; }; responses: { /** @description The image was added to a board successfully */ 201: { content: { - "application/json": unknown; + 'application/json': unknown; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11087,20 +11919,20 @@ export type operations = { remove_image_from_board: { requestBody: { content: { - "application/json": components["schemas"]["Body_remove_image_from_board"]; + 'application/json': components['schemas']['Body_remove_image_from_board']; }; }; responses: { /** @description The image was removed from the board successfully */ 201: { content: { - "application/json": unknown; + 'application/json': unknown; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11112,20 +11944,20 @@ export type operations = { add_images_to_board: { requestBody: { content: { - "application/json": components["schemas"]["Body_add_images_to_board"]; + 'application/json': components['schemas']['Body_add_images_to_board']; }; }; responses: { /** @description Images were added to board successfully */ 201: { content: { - "application/json": components["schemas"]["AddImagesToBoardResult"]; + 'application/json': components['schemas']['AddImagesToBoardResult']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11137,20 +11969,20 @@ export type operations = { remove_images_from_board: { requestBody: { content: { - "application/json": components["schemas"]["Body_remove_images_from_board"]; + 'application/json': components['schemas']['Body_remove_images_from_board']; }; }; responses: { /** @description Images were removed from board successfully */ 201: { content: { - "application/json": components["schemas"]["RemoveImagesFromBoardResult"]; + 'application/json': components['schemas']['RemoveImagesFromBoardResult']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11161,7 +11993,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["AppVersion"]; + 'application/json': components['schemas']['AppVersion']; }; }; }; @@ -11172,7 +12004,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["AppConfig"]; + 'application/json': components['schemas']['AppConfig']; }; }; }; @@ -11186,7 +12018,7 @@ export type operations = { /** @description The operation was successful */ 200: { content: { - "application/json": components["schemas"]["LogLevel"]; + 'application/json': components['schemas']['LogLevel']; }; }; }; @@ -11198,20 +12030,20 @@ export type operations = { set_log_level: { requestBody: { content: { - "application/json": components["schemas"]["LogLevel"]; + 'application/json': components['schemas']['LogLevel']; }; }; responses: { /** @description The operation was successful */ 200: { content: { - "application/json": components["schemas"]["LogLevel"]; + 'application/json': components['schemas']['LogLevel']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11225,7 +12057,7 @@ export type operations = { /** @description The operation was successful */ 200: { content: { - "application/json": unknown; + 'application/json': unknown; }; }; }; @@ -11239,7 +12071,7 @@ export type operations = { /** @description The operation was successful */ 200: { content: { - "application/json": unknown; + 'application/json': unknown; }; }; }; @@ -11253,7 +12085,7 @@ export type operations = { /** @description The operation was successful */ 200: { content: { - "application/json": unknown; + 'application/json': unknown; }; }; }; @@ -11267,7 +12099,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["InvocationCacheStatus"]; + 'application/json': components['schemas']['InvocationCacheStatus']; }; }; }; @@ -11285,26 +12117,26 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["Body_enqueue_graph"]; + 'application/json': components['schemas']['Body_enqueue_graph']; }; }; responses: { /** @description Successful Response */ 200: { content: { - "application/json": unknown; + 'application/json': unknown; }; }; /** @description Created */ 201: { content: { - "application/json": components["schemas"]["EnqueueGraphResult"]; + 'application/json': components['schemas']['EnqueueGraphResult']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11322,26 +12154,26 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["Body_enqueue_batch"]; + 'application/json': components['schemas']['Body_enqueue_batch']; }; }; responses: { /** @description Successful Response */ 200: { content: { - "application/json": unknown; + 'application/json': unknown; }; }; /** @description Created */ 201: { content: { - "application/json": components["schemas"]["EnqueueBatchResult"]; + 'application/json': components['schemas']['EnqueueBatchResult']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11356,7 +12188,12 @@ export type operations = { /** @description The number of items to fetch */ limit?: number; /** @description The status of items to fetch */ - status?: "pending" | "in_progress" | "completed" | "failed" | "canceled"; + status?: + | 'pending' + | 'in_progress' + | 'completed' + | 'failed' + | 'canceled'; /** @description The pagination cursor */ cursor?: number; /** @description The pagination cursor priority */ @@ -11371,13 +12208,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["CursorPaginatedResults_SessionQueueItemDTO_"]; + 'application/json': components['schemas']['CursorPaginatedResults_SessionQueueItemDTO_']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11397,13 +12234,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["SessionProcessorStatus"]; + 'application/json': components['schemas']['SessionProcessorStatus']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11423,13 +12260,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["SessionProcessorStatus"]; + 'application/json': components['schemas']['SessionProcessorStatus']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11447,20 +12284,20 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["Body_cancel_by_batch_ids"]; + 'application/json': components['schemas']['Body_cancel_by_batch_ids']; }; }; responses: { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["CancelByBatchIDsResult"]; + 'application/json': components['schemas']['CancelByBatchIDsResult']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11480,13 +12317,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["ClearResult"]; + 'application/json': components['schemas']['ClearResult']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11506,13 +12343,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["PruneResult"]; + 'application/json': components['schemas']['PruneResult']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11532,13 +12369,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["SessionQueueItem"]; + 'application/json': components['schemas']['SessionQueueItem']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11558,13 +12395,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["SessionQueueItem"]; + 'application/json': components['schemas']['SessionQueueItem']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11584,13 +12421,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["SessionQueueAndProcessorStatus"]; + 'application/json': components['schemas']['SessionQueueAndProcessorStatus']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11612,13 +12449,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["BatchStatus"]; + 'application/json': components['schemas']['BatchStatus']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11640,13 +12477,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["SessionQueueItem"]; + 'application/json': components['schemas']['SessionQueueItem']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; @@ -11668,13 +12505,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["SessionQueueItem"]; + 'application/json': components['schemas']['SessionQueueItem']; }; }; /** @description Validation Error */ 422: { content: { - "application/json": components["schemas"]["HTTPValidationError"]; + 'application/json': components['schemas']['HTTPValidationError']; }; }; }; diff --git a/tests/nodes/test_graph_execution_state.py b/tests/nodes/test_graph_execution_state.py index e43075bd32..27b8a58bea 100644 --- a/tests/nodes/test_graph_execution_state.py +++ b/tests/nodes/test_graph_execution_state.py @@ -1,5 +1,4 @@ import logging -import threading import pytest @@ -10,20 +9,27 @@ from .test_nodes import ( # isort: split TestEventService, TextToImageTestInvocation, ) -import sqlite3 from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext from invokeai.app.invocations.collections import RangeInvocation from invokeai.app.invocations.math import AddInvocation, MultiplyInvocation -from invokeai.app.services.config.invokeai_config import InvokeAIAppConfig -from invokeai.app.services.graph import CollectInvocation, Graph, GraphExecutionState, IterateInvocation, LibraryGraph +from invokeai.app.services.config.config_default import InvokeAIAppConfig from invokeai.app.services.invocation_cache.invocation_cache_memory import MemoryInvocationCache -from invokeai.app.services.invocation_queue import MemoryInvocationQueue +from invokeai.app.services.invocation_processor.invocation_processor_default import DefaultInvocationProcessor +from invokeai.app.services.invocation_queue.invocation_queue_memory import MemoryInvocationQueue from invokeai.app.services.invocation_services import InvocationServices -from invokeai.app.services.invocation_stats import InvocationStatsService -from invokeai.app.services.processor import DefaultInvocationProcessor +from invokeai.app.services.invocation_stats.invocation_stats_default import InvocationStatsService +from invokeai.app.services.item_storage.item_storage_sqlite import SqliteItemStorage from invokeai.app.services.session_queue.session_queue_common import DEFAULT_QUEUE_ID -from invokeai.app.services.sqlite import SqliteItemStorage, sqlite_memory +from invokeai.app.services.shared.graph import ( + CollectInvocation, + Graph, + GraphExecutionState, + IterateInvocation, + LibraryGraph, +) +from invokeai.app.services.shared.sqlite import SqliteDatabase +from invokeai.backend.util.logging import InvokeAILogger from .test_invoker import create_edge @@ -42,29 +48,33 @@ def simple_graph(): # the test invocations. @pytest.fixture def mock_services() -> InvocationServices: - lock = threading.Lock() + configuration = InvokeAIAppConfig(use_memory_db=True, node_cache_size=0) + db = SqliteDatabase(configuration, InvokeAILogger.get_logger()) # NOTE: none of these are actually called by the test invocations - db_conn = sqlite3.connect(sqlite_memory, check_same_thread=False) - graph_execution_manager = SqliteItemStorage[GraphExecutionState]( - conn=db_conn, table_name="graph_executions", lock=lock - ) + graph_execution_manager = SqliteItemStorage[GraphExecutionState](db=db, table_name="graph_executions") return InvocationServices( - model_manager=None, # type: ignore - events=TestEventService(), - logger=logging, # type: ignore - images=None, # type: ignore - latents=None, # type: ignore - boards=None, # type: ignore + board_image_records=None, # type: ignore board_images=None, # type: ignore - queue=MemoryInvocationQueue(), - graph_library=SqliteItemStorage[LibraryGraph](conn=db_conn, table_name="graphs", lock=lock), + board_records=None, # type: ignore + boards=None, # type: ignore + configuration=configuration, + events=TestEventService(), graph_execution_manager=graph_execution_manager, - performance_statistics=InvocationStatsService(graph_execution_manager), + graph_library=SqliteItemStorage[LibraryGraph](db=db, table_name="graphs"), + image_files=None, # type: ignore + image_records=None, # type: ignore + images=None, # type: ignore + invocation_cache=MemoryInvocationCache(max_cache_size=0), + latents=None, # type: ignore + logger=logging, # type: ignore + model_manager=None, # type: ignore + names=None, # type: ignore + performance_statistics=InvocationStatsService(), processor=DefaultInvocationProcessor(), - configuration=InvokeAIAppConfig(node_cache_size=0), # type: ignore - session_queue=None, # type: ignore + queue=MemoryInvocationQueue(), session_processor=None, # type: ignore - invocation_cache=MemoryInvocationCache(), # type: ignore + session_queue=None, # type: ignore + urls=None, # type: ignore ) diff --git a/tests/nodes/test_invoker.py b/tests/nodes/test_invoker.py index 7c636c3eca..105f7417cd 100644 --- a/tests/nodes/test_invoker.py +++ b/tests/nodes/test_invoker.py @@ -1,10 +1,9 @@ import logging -import sqlite3 -import threading import pytest -from invokeai.app.services.config.invokeai_config import InvokeAIAppConfig +from invokeai.app.services.config.config_default import InvokeAIAppConfig +from invokeai.backend.util.logging import InvokeAILogger # This import must happen before other invoke imports or test in other files(!!) break from .test_nodes import ( # isort: split @@ -16,15 +15,16 @@ from .test_nodes import ( # isort: split wait_until, ) -from invokeai.app.services.graph import Graph, GraphExecutionState, GraphInvocation, LibraryGraph from invokeai.app.services.invocation_cache.invocation_cache_memory import MemoryInvocationCache -from invokeai.app.services.invocation_queue import MemoryInvocationQueue +from invokeai.app.services.invocation_processor.invocation_processor_default import DefaultInvocationProcessor +from invokeai.app.services.invocation_queue.invocation_queue_memory import MemoryInvocationQueue from invokeai.app.services.invocation_services import InvocationServices -from invokeai.app.services.invocation_stats import InvocationStatsService +from invokeai.app.services.invocation_stats.invocation_stats_default import InvocationStatsService from invokeai.app.services.invoker import Invoker -from invokeai.app.services.processor import DefaultInvocationProcessor +from invokeai.app.services.item_storage.item_storage_sqlite import SqliteItemStorage from invokeai.app.services.session_queue.session_queue_common import DEFAULT_QUEUE_ID -from invokeai.app.services.sqlite import SqliteItemStorage, sqlite_memory +from invokeai.app.services.shared.graph import Graph, GraphExecutionState, GraphInvocation, LibraryGraph +from invokeai.app.services.shared.sqlite import SqliteDatabase @pytest.fixture @@ -52,29 +52,34 @@ def graph_with_subgraph(): # the test invocations. @pytest.fixture def mock_services() -> InvocationServices: - lock = threading.Lock() + db = SqliteDatabase(InvokeAIAppConfig(use_memory_db=True), InvokeAILogger.get_logger()) + configuration = InvokeAIAppConfig(use_memory_db=True, node_cache_size=0) + # NOTE: none of these are actually called by the test invocations - db_conn = sqlite3.connect(sqlite_memory, check_same_thread=False) - graph_execution_manager = SqliteItemStorage[GraphExecutionState]( - conn=db_conn, table_name="graph_executions", lock=lock - ) + graph_execution_manager = SqliteItemStorage[GraphExecutionState](db=db, table_name="graph_executions") return InvocationServices( - model_manager=None, # type: ignore - events=TestEventService(), - logger=logging, # type: ignore - images=None, # type: ignore - latents=None, # type: ignore - boards=None, # type: ignore + board_image_records=None, # type: ignore board_images=None, # type: ignore - queue=MemoryInvocationQueue(), - graph_library=SqliteItemStorage[LibraryGraph](conn=db_conn, table_name="graphs", lock=lock), + board_records=None, # type: ignore + boards=None, # type: ignore + configuration=configuration, + events=TestEventService(), graph_execution_manager=graph_execution_manager, - processor=DefaultInvocationProcessor(), - performance_statistics=InvocationStatsService(graph_execution_manager), - configuration=InvokeAIAppConfig(node_cache_size=0), # type: ignore - session_queue=None, # type: ignore - session_processor=None, # type: ignore + graph_library=SqliteItemStorage[LibraryGraph](db=db, table_name="graphs"), + image_files=None, # type: ignore + image_records=None, # type: ignore + images=None, # type: ignore invocation_cache=MemoryInvocationCache(max_cache_size=0), + latents=None, # type: ignore + logger=logging, # type: ignore + model_manager=None, # type: ignore + names=None, # type: ignore + performance_statistics=InvocationStatsService(), + processor=DefaultInvocationProcessor(), + queue=MemoryInvocationQueue(), + session_processor=None, # type: ignore + session_queue=None, # type: ignore + urls=None, # type: ignore ) diff --git a/tests/nodes/test_node_graph.py b/tests/nodes/test_node_graph.py index 4793e6ffa6..822ffc1588 100644 --- a/tests/nodes/test_node_graph.py +++ b/tests/nodes/test_node_graph.py @@ -11,8 +11,8 @@ from invokeai.app.invocations.image import ShowImageInvocation from invokeai.app.invocations.math import AddInvocation, SubtractInvocation from invokeai.app.invocations.primitives import FloatInvocation, IntegerInvocation from invokeai.app.invocations.upscale import ESRGANInvocation -from invokeai.app.services.default_graphs import create_text_to_image -from invokeai.app.services.graph import ( +from invokeai.app.services.shared.default_graphs import create_text_to_image +from invokeai.app.services.shared.graph import ( CollectInvocation, Edge, EdgeConnection, diff --git a/tests/nodes/test_nodes.py b/tests/nodes/test_nodes.py index 96a5040a38..471c72a005 100644 --- a/tests/nodes/test_nodes.py +++ b/tests/nodes/test_nodes.py @@ -82,8 +82,8 @@ class PromptCollectionTestInvocation(BaseInvocation): # Importing these must happen after test invocations are defined or they won't register -from invokeai.app.services.events import EventServiceBase # noqa: E402 -from invokeai.app.services.graph import Edge, EdgeConnection # noqa: E402 +from invokeai.app.services.events.events_base import EventServiceBase # noqa: E402 +from invokeai.app.services.shared.graph import Edge, EdgeConnection # noqa: E402 def create_edge(from_id: str, from_field: str, to_id: str, to_field: str) -> Edge: diff --git a/tests/nodes/test_session_queue.py b/tests/nodes/test_session_queue.py index f28ec1ac54..6dd7c4845a 100644 --- a/tests/nodes/test_session_queue.py +++ b/tests/nodes/test_session_queue.py @@ -1,7 +1,6 @@ import pytest from pydantic import ValidationError, parse_raw_as -from invokeai.app.services.graph import Graph, GraphExecutionState, GraphInvocation from invokeai.app.services.session_queue.session_queue_common import ( Batch, BatchDataCollection, @@ -12,6 +11,7 @@ from invokeai.app.services.session_queue.session_queue_common import ( populate_graph, prepare_values_to_insert, ) +from invokeai.app.services.shared.graph import Graph, GraphExecutionState, GraphInvocation from tests.nodes.test_nodes import PromptTestInvocation diff --git a/tests/nodes/test_sqlite.py b/tests/nodes/test_sqlite.py index 002161e917..6e4da8b36e 100644 --- a/tests/nodes/test_sqlite.py +++ b/tests/nodes/test_sqlite.py @@ -1,10 +1,10 @@ -import sqlite3 -import threading - import pytest from pydantic import BaseModel, Field -from invokeai.app.services.sqlite import SqliteItemStorage, sqlite_memory +from invokeai.app.services.config.config_default import InvokeAIAppConfig +from invokeai.app.services.item_storage.item_storage_sqlite import SqliteItemStorage +from invokeai.app.services.shared.sqlite import SqliteDatabase +from invokeai.backend.util.logging import InvokeAILogger class TestModel(BaseModel): @@ -14,8 +14,8 @@ class TestModel(BaseModel): @pytest.fixture def db() -> SqliteItemStorage[TestModel]: - db_conn = sqlite3.connect(sqlite_memory, check_same_thread=False) - return SqliteItemStorage[TestModel](db_conn, table_name="test", id_field="id", lock=threading.Lock()) + sqlite_db = SqliteDatabase(InvokeAIAppConfig(use_memory_db=True), InvokeAILogger.get_logger()) + return SqliteItemStorage[TestModel](db=sqlite_db, table_name="test", id_field="id") def test_sqlite_service_can_create_and_get(db: SqliteItemStorage[TestModel]): diff --git a/tests/test_model_manager.py b/tests/test_model_manager.py index 5a28862e1f..3e48c7ed6f 100644 --- a/tests/test_model_manager.py +++ b/tests/test_model_manager.py @@ -2,7 +2,7 @@ from pathlib import Path import pytest -from invokeai.app.services.config import InvokeAIAppConfig +from invokeai.app.services.config.config_default import InvokeAIAppConfig from invokeai.backend import BaseModelType, ModelManager, ModelType, SubModelType BASIC_MODEL_NAME = ("SDXL base", BaseModelType.StableDiffusionXL, ModelType.Main)