diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py index 7b61887eb8..43a72943ee 100644 --- a/invokeai/app/api/routers/images.py +++ b/invokeai/app/api/routers/images.py @@ -42,7 +42,7 @@ async def upload_image( crop_visible: Optional[bool] = Query(default=False, description="Whether to crop the image"), ) -> ImageDTO: """Uploads an image""" - if not file.content_type.startswith("image"): + if not file.content_type or not file.content_type.startswith("image"): raise HTTPException(status_code=415, detail="Not an image") contents = await file.read() diff --git a/invokeai/app/api/routers/models.py b/invokeai/app/api/routers/models.py index a7b1f81252..018f3af02b 100644 --- a/invokeai/app/api/routers/models.py +++ b/invokeai/app/api/routers/models.py @@ -2,11 +2,11 @@ import pathlib -from typing import List, Literal, Optional, Union +from typing import Annotated, List, Literal, Optional, Union from fastapi import Body, Path, Query, Response from fastapi.routing import APIRouter -from pydantic import BaseModel, parse_obj_as +from pydantic import BaseModel, ConfigDict, Field, TypeAdapter from starlette.exceptions import HTTPException from invokeai.backend import BaseModelType, ModelType @@ -23,8 +23,14 @@ from ..dependencies import ApiDependencies models_router = APIRouter(prefix="/v1/models", tags=["models"]) UpdateModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] +update_models_response_adapter = TypeAdapter(UpdateModelResponse) + ImportModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] +import_models_response_adapter = TypeAdapter(ImportModelResponse) + ConvertModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] +convert_models_response_adapter = TypeAdapter(ConvertModelResponse) + MergeModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] ImportModelAttributes = Union[tuple(OPENAPI_MODEL_CONFIGS)] @@ -32,6 +38,11 @@ ImportModelAttributes = Union[tuple(OPENAPI_MODEL_CONFIGS)] class ModelsList(BaseModel): models: list[Union[tuple(OPENAPI_MODEL_CONFIGS)]] + model_config = ConfigDict(use_enum_values=True) + + +models_list_adapter = TypeAdapter(ModelsList) + @models_router.get( "/", @@ -49,7 +60,7 @@ async def list_models( models_raw.extend(ApiDependencies.invoker.services.model_manager.list_models(base_model, model_type)) else: models_raw = ApiDependencies.invoker.services.model_manager.list_models(None, model_type) - models = parse_obj_as(ModelsList, {"models": models_raw}) + models = models_list_adapter.validate_python({"models": models_raw}) return models @@ -105,11 +116,14 @@ async def update_model( info.path = new_info.get("path") # replace empty string values with None/null to avoid phenomenon of vae: '' - info_dict = info.dict() + info_dict = info.model_dump() info_dict = {x: info_dict[x] if info_dict[x] else None for x in info_dict.keys()} ApiDependencies.invoker.services.model_manager.update_model( - model_name=model_name, base_model=base_model, model_type=model_type, model_attributes=info_dict + model_name=model_name, + base_model=base_model, + model_type=model_type, + model_attributes=info_dict, ) model_raw = ApiDependencies.invoker.services.model_manager.list_model( @@ -117,7 +131,7 @@ async def update_model( base_model=base_model, model_type=model_type, ) - model_response = parse_obj_as(UpdateModelResponse, model_raw) + model_response = update_models_response_adapter.validate_python(model_raw) except ModelNotFoundException as e: raise HTTPException(status_code=404, detail=str(e)) except ValueError as e: @@ -159,7 +173,8 @@ async def import_model( try: installed_models = ApiDependencies.invoker.services.model_manager.heuristic_import( - items_to_import=items_to_import, prediction_type_helper=lambda x: prediction_types.get(prediction_type) + items_to_import=items_to_import, + prediction_type_helper=lambda x: prediction_types.get(prediction_type), ) info = installed_models.get(location) @@ -171,7 +186,7 @@ async def import_model( model_raw = ApiDependencies.invoker.services.model_manager.list_model( model_name=info.name, base_model=info.base_model, model_type=info.model_type ) - return parse_obj_as(ImportModelResponse, model_raw) + return import_models_response_adapter.validate_python(model_raw) except ModelNotFoundException as e: logger.error(str(e)) @@ -205,13 +220,18 @@ async def add_model( try: ApiDependencies.invoker.services.model_manager.add_model( - info.model_name, info.base_model, info.model_type, model_attributes=info.dict() + info.model_name, + info.base_model, + info.model_type, + model_attributes=info.model_dump(), ) logger.info(f"Successfully added {info.model_name}") model_raw = ApiDependencies.invoker.services.model_manager.list_model( - model_name=info.model_name, base_model=info.base_model, model_type=info.model_type + model_name=info.model_name, + base_model=info.base_model, + model_type=info.model_type, ) - return parse_obj_as(ImportModelResponse, model_raw) + return import_models_response_adapter.validate_python(model_raw) except ModelNotFoundException as e: logger.error(str(e)) raise HTTPException(status_code=404, detail=str(e)) @@ -223,7 +243,10 @@ async def add_model( @models_router.delete( "/{base_model}/{model_type}/{model_name}", operation_id="del_model", - responses={204: {"description": "Model deleted successfully"}, 404: {"description": "Model not found"}}, + responses={ + 204: {"description": "Model deleted successfully"}, + 404: {"description": "Model not found"}, + }, status_code=204, response_model=None, ) @@ -279,7 +302,7 @@ async def convert_model( model_raw = ApiDependencies.invoker.services.model_manager.list_model( model_name, base_model=base_model, model_type=model_type ) - response = parse_obj_as(ConvertModelResponse, model_raw) + response = convert_models_response_adapter.validate_python(model_raw) except ModelNotFoundException as e: raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found: {str(e)}") except ValueError as e: @@ -302,7 +325,8 @@ async def search_for_models( ) -> List[pathlib.Path]: if not search_path.is_dir(): raise HTTPException( - status_code=404, detail=f"The search path '{search_path}' does not exist or is not directory" + status_code=404, + detail=f"The search path '{search_path}' does not exist or is not directory", ) return ApiDependencies.invoker.services.model_manager.search_for_models(search_path) @@ -337,6 +361,26 @@ async def sync_to_config() -> bool: return True +# There's some weird pydantic-fastapi behaviour that requires this to be a separate class +# TODO: After a few updates, see if it works inside the route operation handler? +class MergeModelsBody(BaseModel): + model_names: List[str] = Field(description="model name", min_length=2, max_length=3) + merged_model_name: Optional[str] = Field(description="Name of destination model") + alpha: Optional[float] = Field(description="Alpha weighting strength to apply to 2d and 3d models", default=0.5) + interp: Optional[MergeInterpolationMethod] = Field(description="Interpolation method") + force: Optional[bool] = Field( + description="Force merging of models created with different versions of diffusers", + default=False, + ) + + merge_dest_directory: Optional[str] = Field( + description="Save the merged model to the designated directory (with 'merged_model_name' appended)", + default=None, + ) + + model_config = ConfigDict(protected_namespaces=()) + + @models_router.put( "/merge/{base_model}", operation_id="merge_models", @@ -349,31 +393,23 @@ async def sync_to_config() -> bool: response_model=MergeModelResponse, ) async def merge_models( + body: Annotated[MergeModelsBody, Body(description="Model configuration", embed=True)], base_model: BaseModelType = Path(description="Base model"), - model_names: List[str] = Body(description="model name", min_items=2, max_items=3), - merged_model_name: Optional[str] = Body(description="Name of destination model"), - alpha: Optional[float] = Body(description="Alpha weighting strength to apply to 2d and 3d models", default=0.5), - interp: Optional[MergeInterpolationMethod] = Body(description="Interpolation method"), - force: Optional[bool] = Body( - description="Force merging of models created with different versions of diffusers", default=False - ), - merge_dest_directory: Optional[str] = Body( - description="Save the merged model to the designated directory (with 'merged_model_name' appended)", - default=None, - ), ) -> MergeModelResponse: """Convert a checkpoint model into a diffusers model""" logger = ApiDependencies.invoker.services.logger try: - logger.info(f"Merging models: {model_names} into {merge_dest_directory or ''}/{merged_model_name}") - dest = pathlib.Path(merge_dest_directory) if merge_dest_directory else None + logger.info( + f"Merging models: {body.model_names} into {body.merge_dest_directory or ''}/{body.merged_model_name}" + ) + dest = pathlib.Path(body.merge_dest_directory) if body.merge_dest_directory else None result = ApiDependencies.invoker.services.model_manager.merge_models( - model_names, - base_model, - merged_model_name=merged_model_name or "+".join(model_names), - alpha=alpha, - interp=interp, - force=force, + model_names=body.model_names, + base_model=base_model, + merged_model_name=body.merged_model_name or "+".join(body.model_names), + alpha=body.alpha, + interp=body.interp, + force=body.force, merge_dest_directory=dest, ) model_raw = ApiDependencies.invoker.services.model_manager.list_model( @@ -381,9 +417,12 @@ async def merge_models( base_model=base_model, model_type=ModelType.Main, ) - response = parse_obj_as(ConvertModelResponse, model_raw) + response = convert_models_response_adapter.validate_python(model_raw) except ModelNotFoundException: - raise HTTPException(status_code=404, detail=f"One or more of the models '{model_names}' not found") + raise HTTPException( + status_code=404, + detail=f"One or more of the models '{body.model_names}' not found", + ) except ValueError as e: raise HTTPException(status_code=400, detail=str(e)) return response diff --git a/invokeai/app/api/routers/utilities.py b/invokeai/app/api/routers/utilities.py index e664cb9070..476d10e2c0 100644 --- a/invokeai/app/api/routers/utilities.py +++ b/invokeai/app/api/routers/utilities.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Optional, Union from dynamicprompts.generators import CombinatorialPromptGenerator, RandomPromptGenerator from fastapi import Body @@ -27,6 +27,7 @@ async def parse_dynamicprompts( combinatorial: bool = Body(default=True, description="Whether to use the combinatorial generator"), ) -> DynamicPromptsResponse: """Creates a batch process""" + generator: Union[RandomPromptGenerator, CombinatorialPromptGenerator] try: error: Optional[str] = None if combinatorial: diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index fdbd64b30d..5bbd8150c1 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -22,7 +22,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c from fastapi.staticfiles import StaticFiles from fastapi_events.handlers.local import local_handler from fastapi_events.middleware import EventHandlerASGIMiddleware - from pydantic.schema import schema + from pydantic.json_schema import models_json_schema # noinspection PyUnresolvedReferences import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import) @@ -31,7 +31,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c from ..backend.util.logging import InvokeAILogger from .api.dependencies import ApiDependencies - from .api.routers import app_info, board_images, boards, images, models, session_queue, sessions, utilities + from .api.routers import app_info, board_images, boards, images, models, session_queue, utilities from .api.sockets import SocketIO from .invocations.baseinvocation import BaseInvocation, UIConfigBase, _InputField, _OutputField @@ -51,7 +51,7 @@ mimetypes.add_type("text/css", ".css") # Create the app # TODO: create this all in a method so configuration/etc. can be passed in? -app = FastAPI(title="Invoke AI", docs_url=None, redoc_url=None) +app = FastAPI(title="Invoke AI", docs_url=None, redoc_url=None, separate_input_output_schemas=False) # Add event handler event_handler_id: int = id(app) @@ -63,18 +63,18 @@ app.add_middleware( socket_io = SocketIO(app) +app.add_middleware( + CORSMiddleware, + allow_origins=app_config.allow_origins, + allow_credentials=app_config.allow_credentials, + allow_methods=app_config.allow_methods, + allow_headers=app_config.allow_headers, +) + # Add startup event to load dependencies @app.on_event("startup") async def startup_event(): - app.add_middleware( - CORSMiddleware, - allow_origins=app_config.allow_origins, - allow_credentials=app_config.allow_credentials, - allow_methods=app_config.allow_methods, - allow_headers=app_config.allow_headers, - ) - ApiDependencies.initialize(config=app_config, event_handler_id=event_handler_id, logger=logger) @@ -85,12 +85,7 @@ async def shutdown_event(): # Include all routers -# TODO: REMOVE -# app.include_router( -# invocation.invocation_router, -# prefix = '/api') - -app.include_router(sessions.session_router, prefix="/api") +# app.include_router(sessions.session_router, prefix="/api") app.include_router(utilities.utilities_router, prefix="/api") @@ -117,6 +112,7 @@ def custom_openapi(): description="An API for invoking AI image operations", version="1.0.0", routes=app.routes, + separate_input_output_schemas=False, # https://fastapi.tiangolo.com/how-to/separate-openapi-schemas/ ) # Add all outputs @@ -127,29 +123,32 @@ def custom_openapi(): output_type = signature(invoker.invoke).return_annotation output_types.add(output_type) - output_schemas = schema(output_types, ref_prefix="#/components/schemas/") - for schema_key, output_schema in output_schemas["definitions"].items(): - output_schema["class"] = "output" - openapi_schema["components"]["schemas"][schema_key] = output_schema - + output_schemas = models_json_schema( + models=[(o, "serialization") for o in output_types], ref_template="#/components/schemas/{model}" + ) + for schema_key, output_schema in output_schemas[1]["$defs"].items(): # TODO: note that we assume the schema_key here is the TYPE.__name__ # This could break in some cases, figure out a better way to do it output_type_titles[schema_key] = output_schema["title"] # Add Node Editor UI helper schemas - ui_config_schemas = schema([UIConfigBase, _InputField, _OutputField], ref_prefix="#/components/schemas/") - for schema_key, ui_config_schema in ui_config_schemas["definitions"].items(): + ui_config_schemas = models_json_schema( + [(UIConfigBase, "serialization"), (_InputField, "serialization"), (_OutputField, "serialization")], + ref_template="#/components/schemas/{model}", + ) + for schema_key, ui_config_schema in ui_config_schemas[1]["$defs"].items(): openapi_schema["components"]["schemas"][schema_key] = ui_config_schema # Add a reference to the output type to additionalProperties of the invoker schema for invoker in all_invocations: invoker_name = invoker.__name__ - output_type = signature(invoker.invoke).return_annotation + output_type = signature(obj=invoker.invoke).return_annotation output_type_title = output_type_titles[output_type.__name__] - invoker_schema = openapi_schema["components"]["schemas"][invoker_name] + invoker_schema = openapi_schema["components"]["schemas"][f"{invoker_name}"] outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"} invoker_schema["output"] = outputs_ref invoker_schema["class"] = "invocation" + openapi_schema["components"]["schemas"][f"{output_type_title}"]["class"] = "output" from invokeai.backend.model_management.models import get_model_config_enums @@ -172,7 +171,7 @@ def custom_openapi(): return app.openapi_schema -app.openapi = custom_openapi +app.openapi = custom_openapi # type: ignore [method-assign] # this is a valid assignment # Override API doc favicons app.mount("/static", StaticFiles(directory=Path(web_dir.__path__[0], "static/dream_web")), name="static") diff --git a/invokeai/app/cli/commands.py b/invokeai/app/cli/commands.py index b000abcf6a..c21c6315ed 100644 --- a/invokeai/app/cli/commands.py +++ b/invokeai/app/cli/commands.py @@ -24,8 +24,8 @@ def add_field_argument(command_parser, name: str, field, default_override=None): if field.default_factory is None else field.default_factory() ) - if get_origin(field.type_) == Literal: - allowed_values = get_args(field.type_) + if get_origin(field.annotation) == Literal: + allowed_values = get_args(field.annotation) allowed_types = set() for val in allowed_values: allowed_types.add(type(val)) @@ -38,15 +38,15 @@ def add_field_argument(command_parser, name: str, field, default_override=None): type=field_type, default=default, choices=allowed_values, - help=field.field_info.description, + help=field.description, ) else: command_parser.add_argument( f"--{name}", dest=name, - type=field.type_, + type=field.annotation, default=default, - help=field.field_info.description, + help=field.description, ) @@ -142,7 +142,6 @@ class BaseCommand(ABC, BaseModel): """A CLI command""" # All commands must include a type name like this: - # type: Literal['your_command_name'] = 'your_command_name' @classmethod def get_all_subclasses(cls): diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index d82b94d0e9..8bd4a89f45 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -7,28 +7,16 @@ import re from abc import ABC, abstractmethod from enum import Enum from inspect import signature -from typing import ( - TYPE_CHECKING, - AbstractSet, - Any, - Callable, - ClassVar, - Literal, - Mapping, - Optional, - Type, - TypeVar, - Union, - get_args, - get_type_hints, -) +from types import UnionType +from typing import TYPE_CHECKING, Any, Callable, ClassVar, Iterable, Literal, Optional, Type, TypeVar, Union import semver -from pydantic import BaseModel, Field, validator -from pydantic.fields import ModelField, Undefined -from pydantic.typing import NoArgAnyCallable +from pydantic import BaseModel, ConfigDict, Field, create_model, field_validator +from pydantic.fields import _Unset +from pydantic_core import PydanticUndefined from invokeai.app.services.config.config_default import InvokeAIAppConfig +from invokeai.app.util.misc import uuid_string if TYPE_CHECKING: from ..services.invocation_services import InvocationServices @@ -211,6 +199,11 @@ class _InputField(BaseModel): ui_choice_labels: Optional[dict[str, str]] item_default: Optional[Any] + model_config = ConfigDict( + validate_assignment=True, + json_schema_serialization_defaults_required=True, + ) + class _OutputField(BaseModel): """ @@ -224,34 +217,36 @@ class _OutputField(BaseModel): ui_type: Optional[UIType] ui_order: Optional[int] + model_config = ConfigDict( + validate_assignment=True, + json_schema_serialization_defaults_required=True, + ) + + +def get_type(klass: BaseModel) -> str: + """Helper function to get an invocation or invocation output's type. This is the default value of the `type` field.""" + return klass.model_fields["type"].default + def InputField( - *args: Any, - default: Any = Undefined, - default_factory: Optional[NoArgAnyCallable] = None, - alias: Optional[str] = None, - title: Optional[str] = None, - description: Optional[str] = None, - exclude: Optional[Union[AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any]] = None, - include: Optional[Union[AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any]] = None, - const: Optional[bool] = None, - gt: Optional[float] = None, - ge: Optional[float] = None, - lt: Optional[float] = None, - le: Optional[float] = None, - multiple_of: Optional[float] = None, - allow_inf_nan: Optional[bool] = None, - max_digits: Optional[int] = None, - decimal_places: Optional[int] = None, - min_items: Optional[int] = None, - max_items: Optional[int] = None, - unique_items: Optional[bool] = None, - min_length: Optional[int] = None, - max_length: Optional[int] = None, - allow_mutation: bool = True, - regex: Optional[str] = None, - discriminator: Optional[str] = None, - repr: bool = True, + # copied from pydantic's Field + default: Any = _Unset, + default_factory: Callable[[], Any] | None = _Unset, + title: str | None = _Unset, + description: str | None = _Unset, + pattern: str | None = _Unset, + strict: bool | None = _Unset, + gt: float | None = _Unset, + ge: float | None = _Unset, + lt: float | None = _Unset, + le: float | None = _Unset, + multiple_of: float | None = _Unset, + allow_inf_nan: bool | None = _Unset, + max_digits: int | None = _Unset, + decimal_places: int | None = _Unset, + min_length: int | None = _Unset, + max_length: int | None = _Unset, + # custom input: Input = Input.Any, ui_type: Optional[UIType] = None, ui_component: Optional[UIComponent] = None, @@ -259,7 +254,6 @@ def InputField( ui_order: Optional[int] = None, ui_choice_labels: Optional[dict[str, str]] = None, item_default: Optional[Any] = None, - **kwargs: Any, ) -> Any: """ Creates an input field for an invocation. @@ -289,18 +283,26 @@ def InputField( : param int ui_order: [None] Specifies the order in which this field should be rendered in the UI. \ : param bool item_default: [None] Specifies the default item value, if this is a collection input. \ - Ignored for non-collection fields.. + Ignored for non-collection fields. """ - return Field( - *args, + + json_schema_extra_: dict[str, Any] = dict( + input=input, + ui_type=ui_type, + ui_component=ui_component, + ui_hidden=ui_hidden, + ui_order=ui_order, + item_default=item_default, + ui_choice_labels=ui_choice_labels, + ) + + field_args = dict( default=default, default_factory=default_factory, - alias=alias, title=title, description=description, - exclude=exclude, - include=include, - const=const, + pattern=pattern, + strict=strict, gt=gt, ge=ge, lt=lt, @@ -309,57 +311,92 @@ def InputField( allow_inf_nan=allow_inf_nan, max_digits=max_digits, decimal_places=decimal_places, - min_items=min_items, - max_items=max_items, - unique_items=unique_items, min_length=min_length, max_length=max_length, - allow_mutation=allow_mutation, - regex=regex, - discriminator=discriminator, - repr=repr, - input=input, - ui_type=ui_type, - ui_component=ui_component, - ui_hidden=ui_hidden, - ui_order=ui_order, - item_default=item_default, - ui_choice_labels=ui_choice_labels, - **kwargs, + ) + + """ + Invocation definitions have their fields typed correctly for their `invoke()` functions. + This typing is often more specific than the actual invocation definition requires, because + fields may have values provided only by connections. + + For example, consider an ResizeImageInvocation with an `image: ImageField` field. + + `image` is required during the call to `invoke()`, but when the python class is instantiated, + the field may not be present. This is fine, because that image field will be provided by a + an ancestor node that outputs the image. + + So we'd like to type that `image` field as `Optional[ImageField]`. If we do that, however, then + we need to handle a lot of extra logic in the `invoke()` function to check if the field has a + value or not. This is very tedious. + + Ideally, the invocation definition would be able to specify that the field is required during + invocation, but optional during instantiation. So the field would be typed as `image: ImageField`, + but when calling the `invoke()` function, we raise an error if the field is not present. + + To do this, we need to do a bit of fanagling to make the pydantic field optional, and then do + extra validation when calling `invoke()`. + + There is some additional logic here to cleaning create the pydantic field via the wrapper. + """ + + # Filter out field args not provided + provided_args = {k: v for (k, v) in field_args.items() if v is not PydanticUndefined} + + if (default is not PydanticUndefined) and (default_factory is not PydanticUndefined): + raise ValueError("Cannot specify both default and default_factory") + + # because we are manually making fields optional, we need to store the original required bool for reference later + if default is PydanticUndefined and default_factory is PydanticUndefined: + json_schema_extra_.update(dict(orig_required=True)) + else: + json_schema_extra_.update(dict(orig_required=False)) + + # make Input.Any and Input.Connection fields optional, providing None as a default if the field doesn't already have one + if (input is Input.Any or input is Input.Connection) and default_factory is PydanticUndefined: + default_ = None if default is PydanticUndefined else default + provided_args.update(dict(default=default_)) + if default is not PydanticUndefined: + # before invoking, we'll grab the original default value and set it on the field if the field wasn't provided a value + json_schema_extra_.update(dict(default=default)) + json_schema_extra_.update(dict(orig_default=default)) + elif default is not PydanticUndefined and default_factory is PydanticUndefined: + default_ = default + provided_args.update(dict(default=default_)) + json_schema_extra_.update(dict(orig_default=default_)) + elif default_factory is not PydanticUndefined: + provided_args.update(dict(default_factory=default_factory)) + # TODO: cannot serialize default_factory... + # json_schema_extra_.update(dict(orig_default_factory=default_factory)) + + return Field( + **provided_args, + json_schema_extra=json_schema_extra_, ) def OutputField( - *args: Any, - default: Any = Undefined, - default_factory: Optional[NoArgAnyCallable] = None, - alias: Optional[str] = None, - title: Optional[str] = None, - description: Optional[str] = None, - exclude: Optional[Union[AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any]] = None, - include: Optional[Union[AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any]] = None, - const: Optional[bool] = None, - gt: Optional[float] = None, - ge: Optional[float] = None, - lt: Optional[float] = None, - le: Optional[float] = None, - multiple_of: Optional[float] = None, - allow_inf_nan: Optional[bool] = None, - max_digits: Optional[int] = None, - decimal_places: Optional[int] = None, - min_items: Optional[int] = None, - max_items: Optional[int] = None, - unique_items: Optional[bool] = None, - min_length: Optional[int] = None, - max_length: Optional[int] = None, - allow_mutation: bool = True, - regex: Optional[str] = None, - discriminator: Optional[str] = None, - repr: bool = True, + # copied from pydantic's Field + default: Any = _Unset, + default_factory: Callable[[], Any] | None = _Unset, + title: str | None = _Unset, + description: str | None = _Unset, + pattern: str | None = _Unset, + strict: bool | None = _Unset, + gt: float | None = _Unset, + ge: float | None = _Unset, + lt: float | None = _Unset, + le: float | None = _Unset, + multiple_of: float | None = _Unset, + allow_inf_nan: bool | None = _Unset, + max_digits: int | None = _Unset, + decimal_places: int | None = _Unset, + min_length: int | None = _Unset, + max_length: int | None = _Unset, + # custom ui_type: Optional[UIType] = None, ui_hidden: bool = False, ui_order: Optional[int] = None, - **kwargs: Any, ) -> Any: """ Creates an output field for an invocation output. @@ -379,15 +416,12 @@ def OutputField( : param int ui_order: [None] Specifies the order in which this field should be rendered in the UI. \ """ return Field( - *args, default=default, default_factory=default_factory, - alias=alias, title=title, description=description, - exclude=exclude, - include=include, - const=const, + pattern=pattern, + strict=strict, gt=gt, ge=ge, lt=lt, @@ -396,19 +430,13 @@ def OutputField( allow_inf_nan=allow_inf_nan, max_digits=max_digits, decimal_places=decimal_places, - min_items=min_items, - max_items=max_items, - unique_items=unique_items, min_length=min_length, max_length=max_length, - allow_mutation=allow_mutation, - regex=regex, - discriminator=discriminator, - repr=repr, - ui_type=ui_type, - ui_hidden=ui_hidden, - ui_order=ui_order, - **kwargs, + json_schema_extra=dict( + ui_type=ui_type, + ui_hidden=ui_hidden, + ui_order=ui_order, + ), ) @@ -422,7 +450,13 @@ class UIConfigBase(BaseModel): title: Optional[str] = Field(default=None, description="The node's display name") category: Optional[str] = Field(default=None, description="The node's category") version: Optional[str] = Field( - default=None, description='The node\'s version. Should be a valid semver string e.g. "1.0.0" or "3.8.13".' + default=None, + description='The node\'s version. Should be a valid semver string e.g. "1.0.0" or "3.8.13".', + ) + + model_config = ConfigDict( + validate_assignment=True, + json_schema_serialization_defaults_required=True, ) @@ -457,23 +491,38 @@ class BaseInvocationOutput(BaseModel): All invocation outputs must use the `@invocation_output` decorator to provide their unique type. """ - @classmethod - def get_all_subclasses_tuple(cls): - subclasses = [] - toprocess = [cls] - while len(toprocess) > 0: - next = toprocess.pop(0) - next_subclasses = next.__subclasses__() - subclasses.extend(next_subclasses) - toprocess.extend(next_subclasses) - return tuple(subclasses) + _output_classes: ClassVar[set[BaseInvocationOutput]] = set() - class Config: - @staticmethod - def schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None: - if "required" not in schema or not isinstance(schema["required"], list): - schema["required"] = list() - schema["required"].extend(["type"]) + @classmethod + def register_output(cls, output: BaseInvocationOutput) -> None: + cls._output_classes.add(output) + + @classmethod + def get_outputs(cls) -> Iterable[BaseInvocationOutput]: + return cls._output_classes + + @classmethod + def get_outputs_union(cls) -> UnionType: + outputs_union = Union[tuple(cls._output_classes)] # type: ignore [valid-type] + return outputs_union # type: ignore [return-value] + + @classmethod + def get_output_types(cls) -> Iterable[str]: + return map(lambda i: get_type(i), BaseInvocationOutput.get_outputs()) + + @staticmethod + def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None: + # Because we use a pydantic Literal field with default value for the invocation type, + # it will be typed as optional in the OpenAPI schema. Make it required manually. + if "required" not in schema or not isinstance(schema["required"], list): + schema["required"] = list() + schema["required"].extend(["type"]) + + model_config = ConfigDict( + validate_assignment=True, + json_schema_serialization_defaults_required=True, + json_schema_extra=json_schema_extra, + ) class RequiredConnectionException(Exception): @@ -498,104 +547,91 @@ class BaseInvocation(ABC, BaseModel): All invocations must use the `@invocation` decorator to provide their unique type. """ + _invocation_classes: ClassVar[set[BaseInvocation]] = set() + @classmethod - def get_all_subclasses(cls): + def register_invocation(cls, invocation: BaseInvocation) -> None: + cls._invocation_classes.add(invocation) + + @classmethod + def get_invocations_union(cls) -> UnionType: + invocations_union = Union[tuple(cls._invocation_classes)] # type: ignore [valid-type] + return invocations_union # type: ignore [return-value] + + @classmethod + def get_invocations(cls) -> Iterable[BaseInvocation]: app_config = InvokeAIAppConfig.get_config() - subclasses = [] - toprocess = [cls] - while len(toprocess) > 0: - next = toprocess.pop(0) - next_subclasses = next.__subclasses__() - subclasses.extend(next_subclasses) - toprocess.extend(next_subclasses) - allowed_invocations = [] - for sc in subclasses: + allowed_invocations: set[BaseInvocation] = set() + for sc in cls._invocation_classes: + invocation_type = get_type(sc) is_in_allowlist = ( - sc.__fields__.get("type").default in app_config.allow_nodes - if isinstance(app_config.allow_nodes, list) - else True + invocation_type in app_config.allow_nodes if isinstance(app_config.allow_nodes, list) else True ) - is_in_denylist = ( - sc.__fields__.get("type").default in app_config.deny_nodes - if isinstance(app_config.deny_nodes, list) - else False + invocation_type in app_config.deny_nodes if isinstance(app_config.deny_nodes, list) else False ) - if is_in_allowlist and not is_in_denylist: - allowed_invocations.append(sc) + allowed_invocations.add(sc) return allowed_invocations @classmethod - def get_invocations(cls): - return tuple(BaseInvocation.get_all_subclasses()) - - @classmethod - def get_invocations_map(cls): + def get_invocations_map(cls) -> dict[str, BaseInvocation]: # Get the type strings out of the literals and into a dictionary return dict( map( - lambda t: (get_args(get_type_hints(t)["type"])[0], t), - BaseInvocation.get_all_subclasses(), + lambda i: (get_type(i), i), + BaseInvocation.get_invocations(), ) ) @classmethod - def get_output_type(cls): + def get_invocation_types(cls) -> Iterable[str]: + return map(lambda i: get_type(i), BaseInvocation.get_invocations()) + + @classmethod + def get_output_type(cls) -> BaseInvocationOutput: return signature(cls.invoke).return_annotation - class Config: - validate_assignment = True - validate_all = True - - @staticmethod - def schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None: - uiconfig = getattr(model_class, "UIConfig", None) - if uiconfig and hasattr(uiconfig, "title"): - schema["title"] = uiconfig.title - if uiconfig and hasattr(uiconfig, "tags"): - schema["tags"] = uiconfig.tags - if uiconfig and hasattr(uiconfig, "category"): - schema["category"] = uiconfig.category - if uiconfig and hasattr(uiconfig, "version"): - schema["version"] = uiconfig.version - if "required" not in schema or not isinstance(schema["required"], list): - schema["required"] = list() - schema["required"].extend(["type", "id"]) + @staticmethod + def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None: + # Add the various UI-facing attributes to the schema. These are used to build the invocation templates. + uiconfig = getattr(model_class, "UIConfig", None) + if uiconfig and hasattr(uiconfig, "title"): + schema["title"] = uiconfig.title + if uiconfig and hasattr(uiconfig, "tags"): + schema["tags"] = uiconfig.tags + if uiconfig and hasattr(uiconfig, "category"): + schema["category"] = uiconfig.category + if uiconfig and hasattr(uiconfig, "version"): + schema["version"] = uiconfig.version + if "required" not in schema or not isinstance(schema["required"], list): + schema["required"] = list() + schema["required"].extend(["type", "id"]) @abstractmethod def invoke(self, context: InvocationContext) -> BaseInvocationOutput: """Invoke with provided context and return outputs.""" pass - def __init__(self, **data): - # nodes may have required fields, that can accept input from connections - # on instantiation of the model, we need to exclude these from validation - restore = dict() - try: - field_names = list(self.__fields__.keys()) - for field_name in field_names: - # if the field is required and may get its value from a connection, exclude it from validation - field = self.__fields__[field_name] - _input = field.field_info.extra.get("input", None) - if _input in [Input.Connection, Input.Any] and field.required: - if field_name not in data: - restore[field_name] = self.__fields__.pop(field_name) - # instantiate the node, which will validate the data - super().__init__(**data) - finally: - # restore the removed fields - for field_name, field in restore.items(): - self.__fields__[field_name] = field - def invoke_internal(self, context: InvocationContext) -> BaseInvocationOutput: - for field_name, field in self.__fields__.items(): - _input = field.field_info.extra.get("input", None) - if field.required and not hasattr(self, field_name): - if _input == Input.Connection: - raise RequiredConnectionException(self.__fields__["type"].default, field_name) - elif _input == Input.Any: - raise MissingInputException(self.__fields__["type"].default, field_name) + for field_name, field in self.model_fields.items(): + if not field.json_schema_extra or callable(field.json_schema_extra): + # something has gone terribly awry, we should always have this and it should be a dict + continue + + # Here we handle the case where the field is optional in the pydantic class, but required + # in the `invoke()` method. + + orig_default = field.json_schema_extra.get("orig_default", PydanticUndefined) + orig_required = field.json_schema_extra.get("orig_required", True) + input_ = field.json_schema_extra.get("input", None) + if orig_default is not PydanticUndefined and not hasattr(self, field_name): + setattr(self, field_name, orig_default) + if orig_required and orig_default is PydanticUndefined and getattr(self, field_name) is None: + if input_ == Input.Connection: + raise RequiredConnectionException(self.model_fields["type"].default, field_name) + elif input_ == Input.Any: + raise MissingInputException(self.model_fields["type"].default, field_name) # skip node cache codepath if it's disabled if context.services.configuration.node_cache_size == 0: @@ -618,23 +654,31 @@ class BaseInvocation(ABC, BaseModel): return self.invoke(context) def get_type(self) -> str: - return self.__fields__["type"].default + return self.model_fields["type"].default id: str = Field( - description="The id of this instance of an invocation. Must be unique among all instances of invocations." + default_factory=uuid_string, + description="The id of this instance of an invocation. Must be unique among all instances of invocations.", ) - is_intermediate: bool = InputField( - default=False, description="Whether or not this is an intermediate invocation.", ui_type=UIType.IsIntermediate + is_intermediate: Optional[bool] = Field( + default=False, + description="Whether or not this is an intermediate invocation.", + json_schema_extra=dict(ui_type=UIType.IsIntermediate), ) - workflow: Optional[str] = InputField( + workflow: Optional[str] = Field( default=None, description="The workflow to save with the image", - ui_type=UIType.WorkflowField, + json_schema_extra=dict(ui_type=UIType.WorkflowField), + ) + use_cache: Optional[bool] = Field( + default=True, + description="Whether or not to use the cache", ) - use_cache: bool = InputField(default=True, description="Whether or not to use the cache") - @validator("workflow", pre=True) + @field_validator("workflow", mode="before") + @classmethod def validate_workflow_is_json(cls, v): + """We don't have a workflow schema in the backend, so we just check that it's valid JSON""" if v is None: return None try: @@ -645,8 +689,14 @@ class BaseInvocation(ABC, BaseModel): UIConfig: ClassVar[Type[UIConfigBase]] + model_config = ConfigDict( + validate_assignment=True, + json_schema_extra=json_schema_extra, + json_schema_serialization_defaults_required=True, + ) -GenericBaseInvocation = TypeVar("GenericBaseInvocation", bound=BaseInvocation) + +TBaseInvocation = TypeVar("TBaseInvocation", bound=BaseInvocation) def invocation( @@ -656,7 +706,7 @@ def invocation( category: Optional[str] = None, version: Optional[str] = None, use_cache: Optional[bool] = True, -) -> Callable[[Type[GenericBaseInvocation]], Type[GenericBaseInvocation]]: +) -> Callable[[Type[TBaseInvocation]], Type[TBaseInvocation]]: """ Adds metadata to an invocation. @@ -668,12 +718,15 @@ def invocation( :param Optional[bool] use_cache: Whether or not to use the invocation cache. Defaults to True. The user may override this in the workflow editor. """ - def wrapper(cls: Type[GenericBaseInvocation]) -> Type[GenericBaseInvocation]: + def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]: # Validate invocation types on creation of invocation classes # TODO: ensure unique? if re.compile(r"^\S+$").match(invocation_type) is None: raise ValueError(f'"invocation_type" must consist of non-whitespace characters, got "{invocation_type}"') + if invocation_type in BaseInvocation.get_invocation_types(): + raise ValueError(f'Invocation type "{invocation_type}" already exists') + # Add OpenAPI schema extras uiconf_name = cls.__qualname__ + ".UIConfig" if not hasattr(cls, "UIConfig") or cls.UIConfig.__qualname__ != uiconf_name: @@ -691,59 +744,83 @@ def invocation( raise InvalidVersionError(f'Invalid version string for node "{invocation_type}": "{version}"') from e cls.UIConfig.version = version if use_cache is not None: - cls.__fields__["use_cache"].default = use_cache + cls.model_fields["use_cache"].default = use_cache + + # Add the invocation type to the model. + + # You'd be tempted to just add the type field and rebuild the model, like this: + # cls.model_fields.update(type=FieldInfo.from_annotated_attribute(Literal[invocation_type], invocation_type)) + # cls.model_rebuild() or cls.model_rebuild(force=True) + + # Unfortunately, because the `GraphInvocation` uses a forward ref in its `graph` field's annotation, this does + # not work. Instead, we have to create a new class with the type field and patch the original class with it. - # Add the invocation type to the pydantic model of the invocation invocation_type_annotation = Literal[invocation_type] # type: ignore - invocation_type_field = ModelField.infer( - name="type", - value=invocation_type, - annotation=invocation_type_annotation, - class_validators=None, - config=cls.__config__, + invocation_type_field = Field( + title="type", + default=invocation_type, ) - cls.__fields__.update({"type": invocation_type_field}) - # to support 3.9, 3.10 and 3.11, as described in https://docs.python.org/3/howto/annotations.html - if annotations := cls.__dict__.get("__annotations__", None): - annotations.update({"type": invocation_type_annotation}) + + docstring = cls.__doc__ + cls = create_model( + cls.__qualname__, + __base__=cls, + __module__=cls.__module__, + type=(invocation_type_annotation, invocation_type_field), + ) + cls.__doc__ = docstring + + # TODO: how to type this correctly? it's typed as ModelMetaclass, a private class in pydantic + BaseInvocation.register_invocation(cls) # type: ignore + return cls return wrapper -GenericBaseInvocationOutput = TypeVar("GenericBaseInvocationOutput", bound=BaseInvocationOutput) +TBaseInvocationOutput = TypeVar("TBaseInvocationOutput", bound=BaseInvocationOutput) def invocation_output( output_type: str, -) -> Callable[[Type[GenericBaseInvocationOutput]], Type[GenericBaseInvocationOutput]]: +) -> Callable[[Type[TBaseInvocationOutput]], Type[TBaseInvocationOutput]]: """ Adds metadata to an invocation output. :param str output_type: The type of the invocation output. Must be unique among all invocation outputs. """ - def wrapper(cls: Type[GenericBaseInvocationOutput]) -> Type[GenericBaseInvocationOutput]: + def wrapper(cls: Type[TBaseInvocationOutput]) -> Type[TBaseInvocationOutput]: # Validate output types on creation of invocation output classes # TODO: ensure unique? if re.compile(r"^\S+$").match(output_type) is None: raise ValueError(f'"output_type" must consist of non-whitespace characters, got "{output_type}"') - # Add the output type to the pydantic model of the invocation output - output_type_annotation = Literal[output_type] # type: ignore - output_type_field = ModelField.infer( - name="type", - value=output_type, - annotation=output_type_annotation, - class_validators=None, - config=cls.__config__, - ) - cls.__fields__.update({"type": output_type_field}) + if output_type in BaseInvocationOutput.get_output_types(): + raise ValueError(f'Invocation type "{output_type}" already exists') - # to support 3.9, 3.10 and 3.11, as described in https://docs.python.org/3/howto/annotations.html - if annotations := cls.__dict__.get("__annotations__", None): - annotations.update({"type": output_type_annotation}) + # Add the output type to the model. + + output_type_annotation = Literal[output_type] # type: ignore + output_type_field = Field( + title="type", + default=output_type, + ) + + docstring = cls.__doc__ + cls = create_model( + cls.__qualname__, + __base__=cls, + __module__=cls.__module__, + type=(output_type_annotation, output_type_field), + ) + cls.__doc__ = docstring + + BaseInvocationOutput.register_output(cls) # type: ignore # TODO: how to type this correctly? return cls return wrapper + + +GenericBaseModel = TypeVar("GenericBaseModel", bound=BaseModel) diff --git a/invokeai/app/invocations/collections.py b/invokeai/app/invocations/collections.py index 83863422f8..f26eebe1ff 100644 --- a/invokeai/app/invocations/collections.py +++ b/invokeai/app/invocations/collections.py @@ -2,7 +2,7 @@ import numpy as np -from pydantic import validator +from pydantic import ValidationInfo, field_validator from invokeai.app.invocations.primitives import IntegerCollectionOutput from invokeai.app.util.misc import SEED_MAX, get_random_seed @@ -20,9 +20,9 @@ class RangeInvocation(BaseInvocation): stop: int = InputField(default=10, description="The stop of the range") step: int = InputField(default=1, description="The step of the range") - @validator("stop") - def stop_gt_start(cls, v, values): - if "start" in values and v <= values["start"]: + @field_validator("stop") + def stop_gt_start(cls, v: int, info: ValidationInfo): + if "start" in info.data and v <= info.data["start"]: raise ValueError("stop must be greater than start") return v diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index b2634c2c56..b3ebc92320 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -1,6 +1,6 @@ import re from dataclasses import dataclass -from typing import List, Union +from typing import List, Optional, Union import torch from compel import Compel, ReturnedEmbeddingsType @@ -43,7 +43,13 @@ class ConditioningFieldData: # PerpNeg = "perp_neg" -@invocation("compel", title="Prompt", tags=["prompt", "compel"], category="conditioning", version="1.0.0") +@invocation( + "compel", + title="Prompt", + tags=["prompt", "compel"], + category="conditioning", + version="1.0.0", +) class CompelInvocation(BaseInvocation): """Parse prompt using compel package to conditioning.""" @@ -61,17 +67,19 @@ class CompelInvocation(BaseInvocation): @torch.no_grad() def invoke(self, context: InvocationContext) -> ConditioningOutput: tokenizer_info = context.services.model_manager.get_model( - **self.clip.tokenizer.dict(), + **self.clip.tokenizer.model_dump(), context=context, ) text_encoder_info = context.services.model_manager.get_model( - **self.clip.text_encoder.dict(), + **self.clip.text_encoder.model_dump(), context=context, ) def _lora_loader(): for lora in self.clip.loras: - lora_info = context.services.model_manager.get_model(**lora.dict(exclude={"weight"}), context=context) + lora_info = context.services.model_manager.get_model( + **lora.model_dump(exclude={"weight"}), context=context + ) yield (lora_info.context.model, lora.weight) del lora_info return @@ -160,11 +168,11 @@ class SDXLPromptInvocationBase: zero_on_empty: bool, ): tokenizer_info = context.services.model_manager.get_model( - **clip_field.tokenizer.dict(), + **clip_field.tokenizer.model_dump(), context=context, ) text_encoder_info = context.services.model_manager.get_model( - **clip_field.text_encoder.dict(), + **clip_field.text_encoder.model_dump(), context=context, ) @@ -172,7 +180,11 @@ class SDXLPromptInvocationBase: if prompt == "" and zero_on_empty: cpu_text_encoder = text_encoder_info.context.model c = torch.zeros( - (1, cpu_text_encoder.config.max_position_embeddings, cpu_text_encoder.config.hidden_size), + ( + 1, + cpu_text_encoder.config.max_position_embeddings, + cpu_text_encoder.config.hidden_size, + ), dtype=text_encoder_info.context.cache.precision, ) if get_pooled: @@ -186,7 +198,9 @@ class SDXLPromptInvocationBase: def _lora_loader(): for lora in clip_field.loras: - lora_info = context.services.model_manager.get_model(**lora.dict(exclude={"weight"}), context=context) + lora_info = context.services.model_manager.get_model( + **lora.model_dump(exclude={"weight"}), context=context + ) yield (lora_info.context.model, lora.weight) del lora_info return @@ -273,8 +287,16 @@ class SDXLPromptInvocationBase: class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): """Parse prompt using compel package to conditioning.""" - prompt: str = InputField(default="", description=FieldDescriptions.compel_prompt, ui_component=UIComponent.Textarea) - style: str = InputField(default="", description=FieldDescriptions.compel_prompt, ui_component=UIComponent.Textarea) + prompt: str = InputField( + default="", + description=FieldDescriptions.compel_prompt, + ui_component=UIComponent.Textarea, + ) + style: str = InputField( + default="", + description=FieldDescriptions.compel_prompt, + ui_component=UIComponent.Textarea, + ) original_width: int = InputField(default=1024, description="") original_height: int = InputField(default=1024, description="") crop_top: int = InputField(default=0, description="") @@ -310,7 +332,9 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): [ c1, torch.zeros( - (c1.shape[0], c2.shape[1] - c1.shape[1], c1.shape[2]), device=c1.device, dtype=c1.dtype + (c1.shape[0], c2.shape[1] - c1.shape[1], c1.shape[2]), + device=c1.device, + dtype=c1.dtype, ), ], dim=1, @@ -321,7 +345,9 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): [ c2, torch.zeros( - (c2.shape[0], c1.shape[1] - c2.shape[1], c2.shape[2]), device=c2.device, dtype=c2.dtype + (c2.shape[0], c1.shape[1] - c2.shape[1], c2.shape[2]), + device=c2.device, + dtype=c2.dtype, ), ], dim=1, @@ -359,7 +385,9 @@ class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase """Parse prompt using compel package to conditioning.""" style: str = InputField( - default="", description=FieldDescriptions.compel_prompt, ui_component=UIComponent.Textarea + default="", + description=FieldDescriptions.compel_prompt, + ui_component=UIComponent.Textarea, ) # TODO: ? original_width: int = InputField(default=1024, description="") original_height: int = InputField(default=1024, description="") @@ -403,10 +431,16 @@ class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase class ClipSkipInvocationOutput(BaseInvocationOutput): """Clip skip node output""" - clip: ClipField = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP") + clip: Optional[ClipField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP") -@invocation("clip_skip", title="CLIP Skip", tags=["clipskip", "clip", "skip"], category="conditioning", version="1.0.0") +@invocation( + "clip_skip", + title="CLIP Skip", + tags=["clipskip", "clip", "skip"], + category="conditioning", + version="1.0.0", +) class ClipSkipInvocation(BaseInvocation): """Skip layers in clip text_encoder model.""" @@ -421,7 +455,9 @@ class ClipSkipInvocation(BaseInvocation): def get_max_token_count( - tokenizer, prompt: Union[FlattenedPrompt, Blend, Conjunction], truncate_if_too_long=False + tokenizer, + prompt: Union[FlattenedPrompt, Blend, Conjunction], + truncate_if_too_long=False, ) -> int: if type(prompt) is Blend: blend: Blend = prompt diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index 59a36935df..200c37d851 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -2,7 +2,7 @@ # initial implementation by Gregg Helt, 2023 # heavily leverages controlnet_aux package: https://github.com/patrickvonplaten/controlnet_aux from builtins import bool, float -from typing import Dict, List, Literal, Optional, Union +from typing import Dict, List, Literal, Union import cv2 import numpy as np @@ -24,7 +24,7 @@ from controlnet_aux import ( ) from controlnet_aux.util import HWC3, ade_palette from PIL import Image -from pydantic import BaseModel, Field, validator +from pydantic import BaseModel, ConfigDict, Field, field_validator from invokeai.app.invocations.primitives import ImageField, ImageOutput from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin @@ -57,6 +57,8 @@ class ControlNetModelField(BaseModel): model_name: str = Field(description="Name of the ControlNet model") base_model: BaseModelType = Field(description="Base model") + model_config = ConfigDict(protected_namespaces=()) + class ControlField(BaseModel): image: ImageField = Field(description="The control image") @@ -71,7 +73,7 @@ class ControlField(BaseModel): control_mode: CONTROLNET_MODE_VALUES = Field(default="balanced", description="The control mode to use") resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use") - @validator("control_weight") + @field_validator("control_weight") def validate_control_weight(cls, v): """Validate that all control weights in the valid range""" if isinstance(v, list): @@ -124,9 +126,7 @@ class ControlNetInvocation(BaseInvocation): ) -@invocation( - "image_processor", title="Base Image Processor", tags=["controlnet"], category="controlnet", version="1.0.0" -) +# This invocation exists for other invocations to subclass it - do not register with @invocation! class ImageProcessorInvocation(BaseInvocation): """Base class for invocations that preprocess images for ControlNet""" @@ -393,9 +393,9 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation): detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res) image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res) - h: Optional[int] = InputField(default=512, ge=0, description="Content shuffle `h` parameter") - w: Optional[int] = InputField(default=512, ge=0, description="Content shuffle `w` parameter") - f: Optional[int] = InputField(default=256, ge=0, description="Content shuffle `f` parameter") + h: int = InputField(default=512, ge=0, description="Content shuffle `h` parameter") + w: int = InputField(default=512, ge=0, description="Content shuffle `w` parameter") + f: int = InputField(default=256, ge=0, description="Content shuffle `f` parameter") def run_processor(self, image): content_shuffle_processor = ContentShuffleDetector() @@ -575,14 +575,14 @@ class ColorMapImageProcessorInvocation(ImageProcessorInvocation): def run_processor(self, image: Image.Image): image = image.convert("RGB") - image = np.array(image, dtype=np.uint8) - height, width = image.shape[:2] + np_image = np.array(image, dtype=np.uint8) + height, width = np_image.shape[:2] width_tile_size = min(self.color_map_tile_size, width) height_tile_size = min(self.color_map_tile_size, height) color_map = cv2.resize( - image, + np_image, (width // width_tile_size, height // height_tile_size), interpolation=cv2.INTER_CUBIC, ) diff --git a/invokeai/app/invocations/facetools.py b/invokeai/app/invocations/facetools.py index 31ab77bd1a..40e15e9476 100644 --- a/invokeai/app/invocations/facetools.py +++ b/invokeai/app/invocations/facetools.py @@ -8,7 +8,7 @@ import numpy as np from mediapipe.python.solutions.face_mesh import FaceMesh # type: ignore[import] from PIL import Image, ImageDraw, ImageFilter, ImageFont, ImageOps from PIL.Image import Image as ImageType -from pydantic import validator +from pydantic import field_validator import invokeai.assets.fonts as font_assets from invokeai.app.invocations.baseinvocation import ( @@ -550,7 +550,7 @@ class FaceMaskInvocation(BaseInvocation): ) invert_mask: bool = InputField(default=False, description="Toggle to invert the mask") - @validator("face_ids") + @field_validator("face_ids") def validate_comma_separated_ints(cls, v) -> str: comma_separated_ints_regex = re.compile(r"^\d*(,\d+)*$") if comma_separated_ints_regex.match(v) is None: diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index 2d59a567c0..3a4f4eadac 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -36,7 +36,13 @@ class ShowImageInvocation(BaseInvocation): ) -@invocation("blank_image", title="Blank Image", tags=["image"], category="image", version="1.0.0") +@invocation( + "blank_image", + title="Blank Image", + tags=["image"], + category="image", + version="1.0.0", +) class BlankImageInvocation(BaseInvocation): """Creates a blank image and forwards it to the pipeline""" @@ -65,7 +71,13 @@ class BlankImageInvocation(BaseInvocation): ) -@invocation("img_crop", title="Crop Image", tags=["image", "crop"], category="image", version="1.0.0") +@invocation( + "img_crop", + title="Crop Image", + tags=["image", "crop"], + category="image", + version="1.0.0", +) class ImageCropInvocation(BaseInvocation): """Crops an image to a specified box. The box can be outside of the image.""" @@ -98,7 +110,13 @@ class ImageCropInvocation(BaseInvocation): ) -@invocation("img_paste", title="Paste Image", tags=["image", "paste"], category="image", version="1.0.1") +@invocation( + "img_paste", + title="Paste Image", + tags=["image", "paste"], + category="image", + version="1.0.1", +) class ImagePasteInvocation(BaseInvocation): """Pastes an image into another image.""" @@ -151,7 +169,13 @@ class ImagePasteInvocation(BaseInvocation): ) -@invocation("tomask", title="Mask from Alpha", tags=["image", "mask"], category="image", version="1.0.0") +@invocation( + "tomask", + title="Mask from Alpha", + tags=["image", "mask"], + category="image", + version="1.0.0", +) class MaskFromAlphaInvocation(BaseInvocation): """Extracts the alpha channel of an image as a mask.""" @@ -182,7 +206,13 @@ class MaskFromAlphaInvocation(BaseInvocation): ) -@invocation("img_mul", title="Multiply Images", tags=["image", "multiply"], category="image", version="1.0.0") +@invocation( + "img_mul", + title="Multiply Images", + tags=["image", "multiply"], + category="image", + version="1.0.0", +) class ImageMultiplyInvocation(BaseInvocation): """Multiplies two images together using `PIL.ImageChops.multiply()`.""" @@ -215,7 +245,13 @@ class ImageMultiplyInvocation(BaseInvocation): IMAGE_CHANNELS = Literal["A", "R", "G", "B"] -@invocation("img_chan", title="Extract Image Channel", tags=["image", "channel"], category="image", version="1.0.0") +@invocation( + "img_chan", + title="Extract Image Channel", + tags=["image", "channel"], + category="image", + version="1.0.0", +) class ImageChannelInvocation(BaseInvocation): """Gets a channel from an image.""" @@ -247,7 +283,13 @@ class ImageChannelInvocation(BaseInvocation): IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"] -@invocation("img_conv", title="Convert Image Mode", tags=["image", "convert"], category="image", version="1.0.0") +@invocation( + "img_conv", + title="Convert Image Mode", + tags=["image", "convert"], + category="image", + version="1.0.0", +) class ImageConvertInvocation(BaseInvocation): """Converts an image to a different mode.""" @@ -276,7 +318,13 @@ class ImageConvertInvocation(BaseInvocation): ) -@invocation("img_blur", title="Blur Image", tags=["image", "blur"], category="image", version="1.0.0") +@invocation( + "img_blur", + title="Blur Image", + tags=["image", "blur"], + category="image", + version="1.0.0", +) class ImageBlurInvocation(BaseInvocation): """Blurs an image""" @@ -330,7 +378,13 @@ PIL_RESAMPLING_MAP = { } -@invocation("img_resize", title="Resize Image", tags=["image", "resize"], category="image", version="1.0.0") +@invocation( + "img_resize", + title="Resize Image", + tags=["image", "resize"], + category="image", + version="1.0.0", +) class ImageResizeInvocation(BaseInvocation): """Resizes an image to specific dimensions""" @@ -359,7 +413,7 @@ class ImageResizeInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) @@ -370,7 +424,13 @@ class ImageResizeInvocation(BaseInvocation): ) -@invocation("img_scale", title="Scale Image", tags=["image", "scale"], category="image", version="1.0.0") +@invocation( + "img_scale", + title="Scale Image", + tags=["image", "scale"], + category="image", + version="1.0.0", +) class ImageScaleInvocation(BaseInvocation): """Scales an image by a factor""" @@ -411,7 +471,13 @@ class ImageScaleInvocation(BaseInvocation): ) -@invocation("img_lerp", title="Lerp Image", tags=["image", "lerp"], category="image", version="1.0.0") +@invocation( + "img_lerp", + title="Lerp Image", + tags=["image", "lerp"], + category="image", + version="1.0.0", +) class ImageLerpInvocation(BaseInvocation): """Linear interpolation of all pixels of an image""" @@ -444,7 +510,13 @@ class ImageLerpInvocation(BaseInvocation): ) -@invocation("img_ilerp", title="Inverse Lerp Image", tags=["image", "ilerp"], category="image", version="1.0.0") +@invocation( + "img_ilerp", + title="Inverse Lerp Image", + tags=["image", "ilerp"], + category="image", + version="1.0.0", +) class ImageInverseLerpInvocation(BaseInvocation): """Inverse linear interpolation of all pixels of an image""" @@ -456,7 +528,7 @@ class ImageInverseLerpInvocation(BaseInvocation): image = context.services.images.get_pil_image(self.image.image_name) image_arr = numpy.asarray(image, dtype=numpy.float32) - image_arr = numpy.minimum(numpy.maximum(image_arr - self.min, 0) / float(self.max - self.min), 1) * 255 + image_arr = numpy.minimum(numpy.maximum(image_arr - self.min, 0) / float(self.max - self.min), 1) * 255 # type: ignore [assignment] ilerp_image = Image.fromarray(numpy.uint8(image_arr)) @@ -477,7 +549,13 @@ class ImageInverseLerpInvocation(BaseInvocation): ) -@invocation("img_nsfw", title="Blur NSFW Image", tags=["image", "nsfw"], category="image", version="1.0.0") +@invocation( + "img_nsfw", + title="Blur NSFW Image", + tags=["image", "nsfw"], + category="image", + version="1.0.0", +) class ImageNSFWBlurInvocation(BaseInvocation): """Add blur to NSFW-flagged images""" @@ -505,7 +583,7 @@ class ImageNSFWBlurInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) @@ -515,7 +593,7 @@ class ImageNSFWBlurInvocation(BaseInvocation): height=image_dto.height, ) - def _get_caution_img(self) -> Image: + def _get_caution_img(self) -> Image.Image: import invokeai.app.assets.images as image_assets caution = Image.open(Path(image_assets.__path__[0]) / "caution.png") @@ -523,7 +601,11 @@ class ImageNSFWBlurInvocation(BaseInvocation): @invocation( - "img_watermark", title="Add Invisible Watermark", tags=["image", "watermark"], category="image", version="1.0.0" + "img_watermark", + title="Add Invisible Watermark", + tags=["image", "watermark"], + category="image", + version="1.0.0", ) class ImageWatermarkInvocation(BaseInvocation): """Add an invisible watermark to an image""" @@ -544,7 +626,7 @@ class ImageWatermarkInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) @@ -555,7 +637,13 @@ class ImageWatermarkInvocation(BaseInvocation): ) -@invocation("mask_edge", title="Mask Edge", tags=["image", "mask", "inpaint"], category="image", version="1.0.0") +@invocation( + "mask_edge", + title="Mask Edge", + tags=["image", "mask", "inpaint"], + category="image", + version="1.0.0", +) class MaskEdgeInvocation(BaseInvocation): """Applies an edge mask to an image""" @@ -601,7 +689,11 @@ class MaskEdgeInvocation(BaseInvocation): @invocation( - "mask_combine", title="Combine Masks", tags=["image", "mask", "multiply"], category="image", version="1.0.0" + "mask_combine", + title="Combine Masks", + tags=["image", "mask", "multiply"], + category="image", + version="1.0.0", ) class MaskCombineInvocation(BaseInvocation): """Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`.""" @@ -632,7 +724,13 @@ class MaskCombineInvocation(BaseInvocation): ) -@invocation("color_correct", title="Color Correct", tags=["image", "color"], category="image", version="1.0.0") +@invocation( + "color_correct", + title="Color Correct", + tags=["image", "color"], + category="image", + version="1.0.0", +) class ColorCorrectInvocation(BaseInvocation): """ Shifts the colors of a target image to match the reference image, optionally @@ -742,7 +840,13 @@ class ColorCorrectInvocation(BaseInvocation): ) -@invocation("img_hue_adjust", title="Adjust Image Hue", tags=["image", "hue"], category="image", version="1.0.0") +@invocation( + "img_hue_adjust", + title="Adjust Image Hue", + tags=["image", "hue"], + category="image", + version="1.0.0", +) class ImageHueAdjustmentInvocation(BaseInvocation): """Adjusts the Hue of an image.""" @@ -980,7 +1084,7 @@ class SaveImageInvocation(BaseInvocation): image: ImageField = InputField(description=FieldDescriptions.image) board: Optional[BoardField] = InputField(default=None, description=FieldDescriptions.board, input=Input.Direct) - metadata: CoreMetadata = InputField( + metadata: Optional[CoreMetadata] = InputField( default=None, description=FieldDescriptions.core_metadata, ui_hidden=True, @@ -997,7 +1101,7 @@ class SaveImageInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) diff --git a/invokeai/app/invocations/ip_adapter.py b/invokeai/app/invocations/ip_adapter.py index 3e3a3d9b1f..81fd1f9f5d 100644 --- a/invokeai/app/invocations/ip_adapter.py +++ b/invokeai/app/invocations/ip_adapter.py @@ -2,7 +2,7 @@ import os from builtins import float from typing import List, Union -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from invokeai.app.invocations.baseinvocation import ( BaseInvocation, @@ -25,11 +25,15 @@ class IPAdapterModelField(BaseModel): model_name: str = Field(description="Name of the IP-Adapter model") base_model: BaseModelType = Field(description="Base model") + model_config = ConfigDict(protected_namespaces=()) + class CLIPVisionModelField(BaseModel): model_name: str = Field(description="Name of the CLIP Vision image encoder model") base_model: BaseModelType = Field(description="Base model (usually 'Any')") + model_config = ConfigDict(protected_namespaces=()) + class IPAdapterField(BaseModel): image: ImageField = Field(description="The IP-Adapter image prompt.") diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 7ca8cbbe6c..7ce0ae7a8a 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -19,7 +19,7 @@ from diffusers.models.attention_processor import ( ) from diffusers.schedulers import DPMSolverSDEScheduler from diffusers.schedulers import SchedulerMixin as Scheduler -from pydantic import validator +from pydantic import field_validator from torchvision.transforms.functional import resize as tv_resize from invokeai.app.invocations.ip_adapter import IPAdapterField @@ -84,12 +84,20 @@ class SchedulerOutput(BaseInvocationOutput): scheduler: SAMPLER_NAME_VALUES = OutputField(description=FieldDescriptions.scheduler, ui_type=UIType.Scheduler) -@invocation("scheduler", title="Scheduler", tags=["scheduler"], category="latents", version="1.0.0") +@invocation( + "scheduler", + title="Scheduler", + tags=["scheduler"], + category="latents", + version="1.0.0", +) class SchedulerInvocation(BaseInvocation): """Selects a scheduler.""" scheduler: SAMPLER_NAME_VALUES = InputField( - default="euler", description=FieldDescriptions.scheduler, ui_type=UIType.Scheduler + default="euler", + description=FieldDescriptions.scheduler, + ui_type=UIType.Scheduler, ) def invoke(self, context: InvocationContext) -> SchedulerOutput: @@ -97,7 +105,11 @@ class SchedulerInvocation(BaseInvocation): @invocation( - "create_denoise_mask", title="Create Denoise Mask", tags=["mask", "denoise"], category="latents", version="1.0.0" + "create_denoise_mask", + title="Create Denoise Mask", + tags=["mask", "denoise"], + category="latents", + version="1.0.0", ) class CreateDenoiseMaskInvocation(BaseInvocation): """Creates mask for denoising model run.""" @@ -106,7 +118,11 @@ class CreateDenoiseMaskInvocation(BaseInvocation): image: Optional[ImageField] = InputField(default=None, description="Image which will be masked", ui_order=1) mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2) tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3) - fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32, ui_order=4) + fp32: bool = InputField( + default=DEFAULT_PRECISION == "float32", + description=FieldDescriptions.fp32, + ui_order=4, + ) def prep_mask_tensor(self, mask_image): if mask_image.mode != "L": @@ -134,7 +150,7 @@ class CreateDenoiseMaskInvocation(BaseInvocation): if image is not None: vae_info = context.services.model_manager.get_model( - **self.vae.vae.dict(), + **self.vae.vae.model_dump(), context=context, ) @@ -167,7 +183,7 @@ def get_scheduler( ) -> Scheduler: scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP["ddim"]) orig_scheduler_info = context.services.model_manager.get_model( - **scheduler_info.dict(), + **scheduler_info.model_dump(), context=context, ) with orig_scheduler_info as orig_scheduler: @@ -209,34 +225,64 @@ class DenoiseLatentsInvocation(BaseInvocation): negative_conditioning: ConditioningField = InputField( description=FieldDescriptions.negative_cond, input=Input.Connection, ui_order=1 ) - noise: Optional[LatentsField] = InputField(description=FieldDescriptions.noise, input=Input.Connection, ui_order=3) + noise: Optional[LatentsField] = InputField( + default=None, + description=FieldDescriptions.noise, + input=Input.Connection, + ui_order=3, + ) steps: int = InputField(default=10, gt=0, description=FieldDescriptions.steps) cfg_scale: Union[float, List[float]] = InputField( default=7.5, ge=1, description=FieldDescriptions.cfg_scale, title="CFG Scale" ) - denoising_start: float = InputField(default=0.0, ge=0, le=1, description=FieldDescriptions.denoising_start) + denoising_start: float = InputField( + default=0.0, + ge=0, + le=1, + description=FieldDescriptions.denoising_start, + ) denoising_end: float = InputField(default=1.0, ge=0, le=1, description=FieldDescriptions.denoising_end) scheduler: SAMPLER_NAME_VALUES = InputField( - default="euler", description=FieldDescriptions.scheduler, ui_type=UIType.Scheduler + default="euler", + description=FieldDescriptions.scheduler, + ui_type=UIType.Scheduler, ) - unet: UNetField = InputField(description=FieldDescriptions.unet, input=Input.Connection, title="UNet", ui_order=2) - control: Union[ControlField, list[ControlField]] = InputField( + unet: UNetField = InputField( + description=FieldDescriptions.unet, + input=Input.Connection, + title="UNet", + ui_order=2, + ) + control: Optional[Union[ControlField, list[ControlField]]] = InputField( default=None, input=Input.Connection, ui_order=5, ) ip_adapter: Optional[Union[IPAdapterField, list[IPAdapterField]]] = InputField( - description=FieldDescriptions.ip_adapter, title="IP-Adapter", default=None, input=Input.Connection, ui_order=6 + description=FieldDescriptions.ip_adapter, + title="IP-Adapter", + default=None, + input=Input.Connection, + ui_order=6, ) - t2i_adapter: Union[T2IAdapterField, list[T2IAdapterField]] = InputField( - description=FieldDescriptions.t2i_adapter, title="T2I-Adapter", default=None, input=Input.Connection, ui_order=7 + t2i_adapter: Optional[Union[T2IAdapterField, list[T2IAdapterField]]] = InputField( + description=FieldDescriptions.t2i_adapter, + title="T2I-Adapter", + default=None, + input=Input.Connection, + ui_order=7, + ) + latents: Optional[LatentsField] = InputField( + default=None, description=FieldDescriptions.latents, input=Input.Connection ) - latents: Optional[LatentsField] = InputField(description=FieldDescriptions.latents, input=Input.Connection) denoise_mask: Optional[DenoiseMaskField] = InputField( - default=None, description=FieldDescriptions.mask, input=Input.Connection, ui_order=8 + default=None, + description=FieldDescriptions.mask, + input=Input.Connection, + ui_order=8, ) - @validator("cfg_scale") + @field_validator("cfg_scale") def ge_one(cls, v): """validate that all cfg_scale values are >= 1""" if isinstance(v, list): @@ -259,7 +305,7 @@ class DenoiseLatentsInvocation(BaseInvocation): stable_diffusion_step_callback( context=context, intermediate_state=intermediate_state, - node=self.dict(), + node=self.model_dump(), source_node_id=source_node_id, base_model=base_model, ) @@ -451,9 +497,10 @@ class DenoiseLatentsInvocation(BaseInvocation): # models are needed in memory. This would help to reduce peak memory utilization in low-memory environments. with image_encoder_model_info as image_encoder_model: # Get image embeddings from CLIP and ImageProjModel. - image_prompt_embeds, uncond_image_prompt_embeds = ip_adapter_model.get_image_embeds( - input_image, image_encoder_model - ) + ( + image_prompt_embeds, + uncond_image_prompt_embeds, + ) = ip_adapter_model.get_image_embeds(input_image, image_encoder_model) conditioning_data.ip_adapter_conditioning.append( IPAdapterConditioningInfo(image_prompt_embeds, uncond_image_prompt_embeds) ) @@ -628,7 +675,10 @@ class DenoiseLatentsInvocation(BaseInvocation): # TODO(ryand): I have hard-coded `do_classifier_free_guidance=True` to mirror the behaviour of ControlNets, # below. Investigate whether this is appropriate. t2i_adapter_data = self.run_t2i_adapters( - context, self.t2i_adapter, latents.shape, do_classifier_free_guidance=True + context, + self.t2i_adapter, + latents.shape, + do_classifier_free_guidance=True, ) # Get the source node id (we are invoking the prepared node) @@ -641,7 +691,7 @@ class DenoiseLatentsInvocation(BaseInvocation): def _lora_loader(): for lora in self.unet.loras: lora_info = context.services.model_manager.get_model( - **lora.dict(exclude={"weight"}), + **lora.model_dump(exclude={"weight"}), context=context, ) yield (lora_info.context.model, lora.weight) @@ -649,7 +699,7 @@ class DenoiseLatentsInvocation(BaseInvocation): return unet_info = context.services.model_manager.get_model( - **self.unet.unet.dict(), + **self.unet.unet.model_dump(), context=context, ) with ( @@ -700,7 +750,10 @@ class DenoiseLatentsInvocation(BaseInvocation): denoising_end=self.denoising_end, ) - result_latents, result_attention_map_saver = pipeline.latents_from_embeddings( + ( + result_latents, + result_attention_map_saver, + ) = pipeline.latents_from_embeddings( latents=latents, timesteps=timesteps, init_timestep=init_timestep, @@ -728,7 +781,11 @@ class DenoiseLatentsInvocation(BaseInvocation): @invocation( - "l2i", title="Latents to Image", tags=["latents", "image", "vae", "l2i"], category="latents", version="1.0.0" + "l2i", + title="Latents to Image", + tags=["latents", "image", "vae", "l2i"], + category="latents", + version="1.0.0", ) class LatentsToImageInvocation(BaseInvocation): """Generates an image from latents.""" @@ -743,7 +800,7 @@ class LatentsToImageInvocation(BaseInvocation): ) tiled: bool = InputField(default=False, description=FieldDescriptions.tiled) fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32) - metadata: CoreMetadata = InputField( + metadata: Optional[CoreMetadata] = InputField( default=None, description=FieldDescriptions.core_metadata, ui_hidden=True, @@ -754,7 +811,7 @@ class LatentsToImageInvocation(BaseInvocation): latents = context.services.latents.get(self.latents.latents_name) vae_info = context.services.model_manager.get_model( - **self.vae.vae.dict(), + **self.vae.vae.model_dump(), context=context, ) @@ -816,7 +873,7 @@ class LatentsToImageInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) @@ -830,7 +887,13 @@ class LatentsToImageInvocation(BaseInvocation): LATENTS_INTERPOLATION_MODE = Literal["nearest", "linear", "bilinear", "bicubic", "trilinear", "area", "nearest-exact"] -@invocation("lresize", title="Resize Latents", tags=["latents", "resize"], category="latents", version="1.0.0") +@invocation( + "lresize", + title="Resize Latents", + tags=["latents", "resize"], + category="latents", + version="1.0.0", +) class ResizeLatentsInvocation(BaseInvocation): """Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8.""" @@ -876,7 +939,13 @@ class ResizeLatentsInvocation(BaseInvocation): return build_latents_output(latents_name=name, latents=resized_latents, seed=self.latents.seed) -@invocation("lscale", title="Scale Latents", tags=["latents", "resize"], category="latents", version="1.0.0") +@invocation( + "lscale", + title="Scale Latents", + tags=["latents", "resize"], + category="latents", + version="1.0.0", +) class ScaleLatentsInvocation(BaseInvocation): """Scales latents by a given factor.""" @@ -915,7 +984,11 @@ class ScaleLatentsInvocation(BaseInvocation): @invocation( - "i2l", title="Image to Latents", tags=["latents", "image", "vae", "i2l"], category="latents", version="1.0.0" + "i2l", + title="Image to Latents", + tags=["latents", "image", "vae", "i2l"], + category="latents", + version="1.0.0", ) class ImageToLatentsInvocation(BaseInvocation): """Encodes an image into latents.""" @@ -979,7 +1052,7 @@ class ImageToLatentsInvocation(BaseInvocation): image = context.services.images.get_pil_image(self.image.image_name) vae_info = context.services.model_manager.get_model( - **self.vae.vae.dict(), + **self.vae.vae.model_dump(), context=context, ) @@ -1007,7 +1080,13 @@ class ImageToLatentsInvocation(BaseInvocation): return vae.encode(image_tensor).latents -@invocation("lblend", title="Blend Latents", tags=["latents", "blend"], category="latents", version="1.0.0") +@invocation( + "lblend", + title="Blend Latents", + tags=["latents", "blend"], + category="latents", + version="1.0.0", +) class BlendLatentsInvocation(BaseInvocation): """Blend two latents using a given alpha. Latents must have same size.""" diff --git a/invokeai/app/invocations/math.py b/invokeai/app/invocations/math.py index b52cbb28bf..2aefa1def4 100644 --- a/invokeai/app/invocations/math.py +++ b/invokeai/app/invocations/math.py @@ -3,7 +3,7 @@ from typing import Literal import numpy as np -from pydantic import validator +from pydantic import field_validator from invokeai.app.invocations.primitives import FloatOutput, IntegerOutput @@ -72,7 +72,14 @@ class RandomIntInvocation(BaseInvocation): return IntegerOutput(value=np.random.randint(self.low, self.high)) -@invocation("rand_float", title="Random Float", tags=["math", "float", "random"], category="math", version="1.0.0") +@invocation( + "rand_float", + title="Random Float", + tags=["math", "float", "random"], + category="math", + version="1.0.1", + use_cache=False, +) class RandomFloatInvocation(BaseInvocation): """Outputs a single random float""" @@ -178,7 +185,7 @@ class IntegerMathInvocation(BaseInvocation): a: int = InputField(default=0, description=FieldDescriptions.num_1) b: int = InputField(default=0, description=FieldDescriptions.num_2) - @validator("b") + @field_validator("b") def no_unrepresentable_results(cls, v, values): if values["operation"] == "DIV" and v == 0: raise ValueError("Cannot divide by zero") @@ -252,7 +259,7 @@ class FloatMathInvocation(BaseInvocation): a: float = InputField(default=0, description=FieldDescriptions.num_1) b: float = InputField(default=0, description=FieldDescriptions.num_2) - @validator("b") + @field_validator("b") def no_unrepresentable_results(cls, v, values): if values["operation"] == "DIV" and v == 0: raise ValueError("Cannot divide by zero") diff --git a/invokeai/app/invocations/metadata.py b/invokeai/app/invocations/metadata.py index 449f332387..9578fc3ae9 100644 --- a/invokeai/app/invocations/metadata.py +++ b/invokeai/app/invocations/metadata.py @@ -223,4 +223,4 @@ class MetadataAccumulatorInvocation(BaseInvocation): def invoke(self, context: InvocationContext) -> MetadataAccumulatorOutput: """Collects and outputs a CoreMetadata object""" - return MetadataAccumulatorOutput(metadata=CoreMetadata(**self.dict())) + return MetadataAccumulatorOutput(metadata=CoreMetadata(**self.model_dump())) diff --git a/invokeai/app/invocations/model.py b/invokeai/app/invocations/model.py index 571cb2e730..dfa1075d6e 100644 --- a/invokeai/app/invocations/model.py +++ b/invokeai/app/invocations/model.py @@ -1,7 +1,7 @@ import copy from typing import List, Optional -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from ...backend.model_management import BaseModelType, ModelType, SubModelType from .baseinvocation import ( @@ -24,6 +24,8 @@ class ModelInfo(BaseModel): model_type: ModelType = Field(description="Info to load submodel") submodel: Optional[SubModelType] = Field(default=None, description="Info to load submodel") + model_config = ConfigDict(protected_namespaces=()) + class LoraInfo(ModelInfo): weight: float = Field(description="Lora's weight which to use when apply to model") @@ -65,6 +67,8 @@ class MainModelField(BaseModel): base_model: BaseModelType = Field(description="Base model") model_type: ModelType = Field(description="Model Type") + model_config = ConfigDict(protected_namespaces=()) + class LoRAModelField(BaseModel): """LoRA model field""" @@ -72,8 +76,16 @@ class LoRAModelField(BaseModel): model_name: str = Field(description="Name of the LoRA model") base_model: BaseModelType = Field(description="Base model") + model_config = ConfigDict(protected_namespaces=()) -@invocation("main_model_loader", title="Main Model", tags=["model"], category="model", version="1.0.0") + +@invocation( + "main_model_loader", + title="Main Model", + tags=["model"], + category="model", + version="1.0.0", +) class MainModelLoaderInvocation(BaseInvocation): """Loads a main model, outputting its submodels.""" @@ -180,10 +192,16 @@ class LoraLoaderInvocation(BaseInvocation): lora: LoRAModelField = InputField(description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA") weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight) unet: Optional[UNetField] = InputField( - default=None, description=FieldDescriptions.unet, input=Input.Connection, title="UNet" + default=None, + description=FieldDescriptions.unet, + input=Input.Connection, + title="UNet", ) clip: Optional[ClipField] = InputField( - default=None, description=FieldDescriptions.clip, input=Input.Connection, title="CLIP" + default=None, + description=FieldDescriptions.clip, + input=Input.Connection, + title="CLIP", ) def invoke(self, context: InvocationContext) -> LoraLoaderOutput: @@ -244,20 +262,35 @@ class SDXLLoraLoaderOutput(BaseInvocationOutput): clip2: Optional[ClipField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP 2") -@invocation("sdxl_lora_loader", title="SDXL LoRA", tags=["lora", "model"], category="model", version="1.0.0") +@invocation( + "sdxl_lora_loader", + title="SDXL LoRA", + tags=["lora", "model"], + category="model", + version="1.0.0", +) class SDXLLoraLoaderInvocation(BaseInvocation): """Apply selected lora to unet and text_encoder.""" lora: LoRAModelField = InputField(description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA") weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight) unet: Optional[UNetField] = InputField( - default=None, description=FieldDescriptions.unet, input=Input.Connection, title="UNet" + default=None, + description=FieldDescriptions.unet, + input=Input.Connection, + title="UNet", ) clip: Optional[ClipField] = InputField( - default=None, description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 1" + default=None, + description=FieldDescriptions.clip, + input=Input.Connection, + title="CLIP 1", ) clip2: Optional[ClipField] = InputField( - default=None, description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 2" + default=None, + description=FieldDescriptions.clip, + input=Input.Connection, + title="CLIP 2", ) def invoke(self, context: InvocationContext) -> SDXLLoraLoaderOutput: @@ -330,6 +363,8 @@ class VAEModelField(BaseModel): model_name: str = Field(description="Name of the model") base_model: BaseModelType = Field(description="Base model") + model_config = ConfigDict(protected_namespaces=()) + @invocation_output("vae_loader_output") class VaeLoaderOutput(BaseInvocationOutput): @@ -343,7 +378,10 @@ class VaeLoaderInvocation(BaseInvocation): """Loads a VAE model, outputting a VaeLoaderOutput""" vae_model: VAEModelField = InputField( - description=FieldDescriptions.vae_model, input=Input.Direct, ui_type=UIType.VaeModel, title="VAE" + description=FieldDescriptions.vae_model, + input=Input.Direct, + ui_type=UIType.VaeModel, + title="VAE", ) def invoke(self, context: InvocationContext) -> VaeLoaderOutput: @@ -372,19 +410,31 @@ class VaeLoaderInvocation(BaseInvocation): class SeamlessModeOutput(BaseInvocationOutput): """Modified Seamless Model output""" - unet: Optional[UNetField] = OutputField(description=FieldDescriptions.unet, title="UNet") - vae: Optional[VaeField] = OutputField(description=FieldDescriptions.vae, title="VAE") + unet: Optional[UNetField] = OutputField(default=None, description=FieldDescriptions.unet, title="UNet") + vae: Optional[VaeField] = OutputField(default=None, description=FieldDescriptions.vae, title="VAE") -@invocation("seamless", title="Seamless", tags=["seamless", "model"], category="model", version="1.0.0") +@invocation( + "seamless", + title="Seamless", + tags=["seamless", "model"], + category="model", + version="1.0.0", +) class SeamlessModeInvocation(BaseInvocation): """Applies the seamless transformation to the Model UNet and VAE.""" unet: Optional[UNetField] = InputField( - default=None, description=FieldDescriptions.unet, input=Input.Connection, title="UNet" + default=None, + description=FieldDescriptions.unet, + input=Input.Connection, + title="UNet", ) vae: Optional[VaeField] = InputField( - default=None, description=FieldDescriptions.vae_model, input=Input.Connection, title="VAE" + default=None, + description=FieldDescriptions.vae_model, + input=Input.Connection, + title="VAE", ) seamless_y: bool = InputField(default=True, input=Input.Any, description="Specify whether Y axis is seamless") seamless_x: bool = InputField(default=True, input=Input.Any, description="Specify whether X axis is seamless") diff --git a/invokeai/app/invocations/noise.py b/invokeai/app/invocations/noise.py index c46747aa89..3c1651a2f0 100644 --- a/invokeai/app/invocations/noise.py +++ b/invokeai/app/invocations/noise.py @@ -2,7 +2,7 @@ import torch -from pydantic import validator +from pydantic import field_validator from invokeai.app.invocations.latent import LatentsField from invokeai.app.util.misc import SEED_MAX, get_random_seed @@ -65,7 +65,7 @@ Nodes class NoiseOutput(BaseInvocationOutput): """Invocation noise output""" - noise: LatentsField = OutputField(default=None, description=FieldDescriptions.noise) + noise: LatentsField = OutputField(description=FieldDescriptions.noise) width: int = OutputField(description=FieldDescriptions.width) height: int = OutputField(description=FieldDescriptions.height) @@ -78,7 +78,13 @@ def build_noise_output(latents_name: str, latents: torch.Tensor, seed: int): ) -@invocation("noise", title="Noise", tags=["latents", "noise"], category="latents", version="1.0.0") +@invocation( + "noise", + title="Noise", + tags=["latents", "noise"], + category="latents", + version="1.0.0", +) class NoiseInvocation(BaseInvocation): """Generates latent noise.""" @@ -105,7 +111,7 @@ class NoiseInvocation(BaseInvocation): description="Use CPU for noise generation (for reproducible results across platforms)", ) - @validator("seed", pre=True) + @field_validator("seed", mode="before") def modulo_seed(cls, v): """Returns the seed modulo (SEED_MAX + 1) to ensure it is within the valid range.""" return v % (SEED_MAX + 1) diff --git a/invokeai/app/invocations/onnx.py b/invokeai/app/invocations/onnx.py index 35f8ed965e..3f4f688cf4 100644 --- a/invokeai/app/invocations/onnx.py +++ b/invokeai/app/invocations/onnx.py @@ -9,7 +9,7 @@ from typing import List, Literal, Optional, Union import numpy as np import torch from diffusers.image_processor import VaeImageProcessor -from pydantic import BaseModel, Field, validator +from pydantic import BaseModel, ConfigDict, Field, field_validator from tqdm import tqdm from invokeai.app.invocations.metadata import CoreMetadata @@ -63,14 +63,17 @@ class ONNXPromptInvocation(BaseInvocation): def invoke(self, context: InvocationContext) -> ConditioningOutput: tokenizer_info = context.services.model_manager.get_model( - **self.clip.tokenizer.dict(), + **self.clip.tokenizer.model_dump(), ) text_encoder_info = context.services.model_manager.get_model( - **self.clip.text_encoder.dict(), + **self.clip.text_encoder.model_dump(), ) with tokenizer_info as orig_tokenizer, text_encoder_info as text_encoder: # , ExitStack() as stack: loras = [ - (context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) + ( + context.services.model_manager.get_model(**lora.model_dump(exclude={"weight"})).context.model, + lora.weight, + ) for lora in self.clip.loras ] @@ -175,14 +178,14 @@ class ONNXTextToLatentsInvocation(BaseInvocation): description=FieldDescriptions.unet, input=Input.Connection, ) - control: Optional[Union[ControlField, list[ControlField]]] = InputField( + control: Union[ControlField, list[ControlField]] = InputField( default=None, description=FieldDescriptions.control, ) # seamless: bool = InputField(default=False, description="Whether or not to generate an image that can tile without seams", ) # seamless_axes: str = InputField(default="", description="The axes to tile the image on, 'x' and/or 'y'") - @validator("cfg_scale") + @field_validator("cfg_scale") def ge_one(cls, v): """validate that all cfg_scale values are >= 1""" if isinstance(v, list): @@ -241,7 +244,7 @@ class ONNXTextToLatentsInvocation(BaseInvocation): stable_diffusion_step_callback( context=context, intermediate_state=intermediate_state, - node=self.dict(), + node=self.model_dump(), source_node_id=source_node_id, ) @@ -254,12 +257,15 @@ class ONNXTextToLatentsInvocation(BaseInvocation): eta=0.0, ) - unet_info = context.services.model_manager.get_model(**self.unet.unet.dict()) + unet_info = context.services.model_manager.get_model(**self.unet.unet.model_dump()) with unet_info as unet: # , ExitStack() as stack: # loras = [(stack.enter_context(context.services.model_manager.get_model(**lora.dict(exclude={"weight"}))), lora.weight) for lora in self.unet.loras] loras = [ - (context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) + ( + context.services.model_manager.get_model(**lora.model_dump(exclude={"weight"})).context.model, + lora.weight, + ) for lora in self.unet.loras ] @@ -346,7 +352,7 @@ class ONNXLatentsToImageInvocation(BaseInvocation): raise Exception(f"Expected vae_decoder, found: {self.vae.vae.model_type}") vae_info = context.services.model_manager.get_model( - **self.vae.vae.dict(), + **self.vae.vae.model_dump(), ) # clear memory as vae decode can request a lot @@ -375,7 +381,7 @@ class ONNXLatentsToImageInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) @@ -403,6 +409,8 @@ class OnnxModelField(BaseModel): base_model: BaseModelType = Field(description="Base model") model_type: ModelType = Field(description="Model Type") + model_config = ConfigDict(protected_namespaces=()) + @invocation("onnx_model_loader", title="ONNX Main Model", tags=["onnx", "model"], category="model", version="1.0.0") class OnnxModelLoaderInvocation(BaseInvocation): diff --git a/invokeai/app/invocations/param_easing.py b/invokeai/app/invocations/param_easing.py index 7c327a6657..0e86fb978b 100644 --- a/invokeai/app/invocations/param_easing.py +++ b/invokeai/app/invocations/param_easing.py @@ -44,13 +44,22 @@ from invokeai.app.invocations.primitives import FloatCollectionOutput from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation -@invocation("float_range", title="Float Range", tags=["math", "range"], category="math", version="1.0.0") +@invocation( + "float_range", + title="Float Range", + tags=["math", "range"], + category="math", + version="1.0.0", +) class FloatLinearRangeInvocation(BaseInvocation): """Creates a range""" start: float = InputField(default=5, description="The first value of the range") stop: float = InputField(default=10, description="The last value of the range") - steps: int = InputField(default=30, description="number of values to interpolate over (including start and stop)") + steps: int = InputField( + default=30, + description="number of values to interpolate over (including start and stop)", + ) def invoke(self, context: InvocationContext) -> FloatCollectionOutput: param_list = list(np.linspace(self.start, self.stop, self.steps)) @@ -95,7 +104,13 @@ EASING_FUNCTION_KEYS = Literal[tuple(list(EASING_FUNCTIONS_MAP.keys()))] # actually I think for now could just use CollectionOutput (which is list[Any] -@invocation("step_param_easing", title="Step Param Easing", tags=["step", "easing"], category="step", version="1.0.0") +@invocation( + "step_param_easing", + title="Step Param Easing", + tags=["step", "easing"], + category="step", + version="1.0.0", +) class StepParamEasingInvocation(BaseInvocation): """Experimental per-step parameter easing for denoising steps""" @@ -159,7 +174,9 @@ class StepParamEasingInvocation(BaseInvocation): context.services.logger.debug("base easing duration: " + str(base_easing_duration)) even_num_steps = num_easing_steps % 2 == 0 # even number of steps easing_function = easing_class( - start=self.start_value, end=self.end_value, duration=base_easing_duration - 1 + start=self.start_value, + end=self.end_value, + duration=base_easing_duration - 1, ) base_easing_vals = list() for step_index in range(base_easing_duration): @@ -199,7 +216,11 @@ class StepParamEasingInvocation(BaseInvocation): # else: # no mirroring (default) - easing_function = easing_class(start=self.start_value, end=self.end_value, duration=num_easing_steps - 1) + easing_function = easing_class( + start=self.start_value, + end=self.end_value, + duration=num_easing_steps - 1, + ) for step_index in range(num_easing_steps): step_val = easing_function.ease(step_index) easing_list.append(step_val) diff --git a/invokeai/app/invocations/prompt.py b/invokeai/app/invocations/prompt.py index b3d482b779..cb43a52447 100644 --- a/invokeai/app/invocations/prompt.py +++ b/invokeai/app/invocations/prompt.py @@ -3,7 +3,7 @@ from typing import Optional, Union import numpy as np from dynamicprompts.generators import CombinatorialPromptGenerator, RandomPromptGenerator -from pydantic import validator +from pydantic import field_validator from invokeai.app.invocations.primitives import StringCollectionOutput @@ -21,7 +21,10 @@ from .baseinvocation import BaseInvocation, InputField, InvocationContext, UICom class DynamicPromptInvocation(BaseInvocation): """Parses a prompt using adieyal/dynamicprompts' random or combinatorial generator""" - prompt: str = InputField(description="The prompt to parse with dynamicprompts", ui_component=UIComponent.Textarea) + prompt: str = InputField( + description="The prompt to parse with dynamicprompts", + ui_component=UIComponent.Textarea, + ) max_prompts: int = InputField(default=1, description="The number of prompts to generate") combinatorial: bool = InputField(default=False, description="Whether to use the combinatorial generator") @@ -36,21 +39,31 @@ class DynamicPromptInvocation(BaseInvocation): return StringCollectionOutput(collection=prompts) -@invocation("prompt_from_file", title="Prompts from File", tags=["prompt", "file"], category="prompt", version="1.0.0") +@invocation( + "prompt_from_file", + title="Prompts from File", + tags=["prompt", "file"], + category="prompt", + version="1.0.0", +) class PromptsFromFileInvocation(BaseInvocation): """Loads prompts from a text file""" file_path: str = InputField(description="Path to prompt text file") pre_prompt: Optional[str] = InputField( - default=None, description="String to prepend to each prompt", ui_component=UIComponent.Textarea + default=None, + description="String to prepend to each prompt", + ui_component=UIComponent.Textarea, ) post_prompt: Optional[str] = InputField( - default=None, description="String to append to each prompt", ui_component=UIComponent.Textarea + default=None, + description="String to append to each prompt", + ui_component=UIComponent.Textarea, ) start_line: int = InputField(default=1, ge=1, description="Line in the file to start start from") max_prompts: int = InputField(default=1, ge=0, description="Max lines to read from file (0=all)") - @validator("file_path") + @field_validator("file_path") def file_path_exists(cls, v): if not exists(v): raise ValueError(FileNotFoundError) @@ -79,6 +92,10 @@ class PromptsFromFileInvocation(BaseInvocation): def invoke(self, context: InvocationContext) -> StringCollectionOutput: prompts = self.promptsFromFile( - self.file_path, self.pre_prompt, self.post_prompt, self.start_line, self.max_prompts + self.file_path, + self.pre_prompt, + self.post_prompt, + self.start_line, + self.max_prompts, ) return StringCollectionOutput(collection=prompts) diff --git a/invokeai/app/invocations/t2i_adapter.py b/invokeai/app/invocations/t2i_adapter.py index e1bd8d0d04..76c250a552 100644 --- a/invokeai/app/invocations/t2i_adapter.py +++ b/invokeai/app/invocations/t2i_adapter.py @@ -1,6 +1,6 @@ from typing import Union -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from invokeai.app.invocations.baseinvocation import ( BaseInvocation, @@ -23,6 +23,8 @@ class T2IAdapterModelField(BaseModel): model_name: str = Field(description="Name of the T2I-Adapter model") base_model: BaseModelType = Field(description="Base model") + model_config = ConfigDict(protected_namespaces=()) + class T2IAdapterField(BaseModel): image: ImageField = Field(description="The T2I-Adapter image prompt.") diff --git a/invokeai/app/invocations/upscale.py b/invokeai/app/invocations/upscale.py index e26c1b9084..d30bb71d95 100644 --- a/invokeai/app/invocations/upscale.py +++ b/invokeai/app/invocations/upscale.py @@ -7,6 +7,7 @@ import numpy as np import torch from basicsr.archs.rrdbnet_arch import RRDBNet from PIL import Image +from pydantic import ConfigDict from realesrgan import RealESRGANer from invokeai.app.invocations.primitives import ImageField, ImageOutput @@ -38,6 +39,8 @@ class ESRGANInvocation(BaseInvocation): default=400, ge=0, description="Tile size for tiled ESRGAN upscaling (0=tiling disabled)" ) + model_config = ConfigDict(protected_namespaces=()) + def invoke(self, context: InvocationContext) -> ImageOutput: image = context.services.images.get_pil_image(self.image.image_name) models_path = context.services.configuration.models_path diff --git a/invokeai/app/services/board_records/board_records_common.py b/invokeai/app/services/board_records/board_records_common.py index e0264dde0d..d08951b499 100644 --- a/invokeai/app/services/board_records/board_records_common.py +++ b/invokeai/app/services/board_records/board_records_common.py @@ -1,7 +1,7 @@ from datetime import datetime from typing import Optional, Union -from pydantic import BaseModel, Extra, Field +from pydantic import BaseModel, Field from invokeai.app.util.misc import get_iso_timestamp from invokeai.app.util.model_exclude_null import BaseModelExcludeNull @@ -18,9 +18,9 @@ class BoardRecord(BaseModelExcludeNull): """The created timestamp of the image.""" updated_at: Union[datetime, str] = Field(description="The updated timestamp of the board.") """The updated timestamp of the image.""" - deleted_at: Union[datetime, str, None] = Field(description="The deleted timestamp of the board.") + deleted_at: Optional[Union[datetime, str]] = Field(default=None, description="The deleted timestamp of the board.") """The updated timestamp of the image.""" - cover_image_name: Optional[str] = Field(description="The name of the cover image of the board.") + cover_image_name: Optional[str] = Field(default=None, description="The name of the cover image of the board.") """The name of the cover image of the board.""" @@ -46,9 +46,9 @@ def deserialize_board_record(board_dict: dict) -> BoardRecord: ) -class BoardChanges(BaseModel, extra=Extra.forbid): - board_name: Optional[str] = Field(description="The board's new name.") - cover_image_name: Optional[str] = Field(description="The name of the board's new cover image.") +class BoardChanges(BaseModel, extra="forbid"): + board_name: Optional[str] = Field(default=None, description="The board's new name.") + cover_image_name: Optional[str] = Field(default=None, description="The name of the board's new cover image.") class BoardRecordNotFoundException(Exception): diff --git a/invokeai/app/services/boards/boards_common.py b/invokeai/app/services/boards/boards_common.py index e22e1915fe..0cb54102bb 100644 --- a/invokeai/app/services/boards/boards_common.py +++ b/invokeai/app/services/boards/boards_common.py @@ -17,7 +17,7 @@ class BoardDTO(BoardRecord): def board_record_to_dto(board_record: BoardRecord, cover_image_name: Optional[str], image_count: int) -> BoardDTO: """Converts a board record to a board DTO.""" return BoardDTO( - **board_record.dict(exclude={"cover_image_name"}), + **board_record.model_dump(exclude={"cover_image_name"}), cover_image_name=cover_image_name, image_count=image_count, ) diff --git a/invokeai/app/services/config/config_base.py b/invokeai/app/services/config/config_base.py index a07e14252a..9405c1dfae 100644 --- a/invokeai/app/services/config/config_base.py +++ b/invokeai/app/services/config/config_base.py @@ -18,7 +18,7 @@ from pathlib import Path from typing import ClassVar, Dict, List, Literal, Optional, Union, get_args, get_origin, get_type_hints from omegaconf import DictConfig, ListConfig, OmegaConf -from pydantic import BaseSettings +from pydantic_settings import BaseSettings, SettingsConfigDict from invokeai.app.services.config.config_common import PagingArgumentParser, int_or_float_or_str @@ -32,12 +32,14 @@ class InvokeAISettings(BaseSettings): initconf: ClassVar[Optional[DictConfig]] = None argparse_groups: ClassVar[Dict] = {} + model_config = SettingsConfigDict(env_file_encoding="utf-8", arbitrary_types_allowed=True, case_sensitive=True) + def parse_args(self, argv: Optional[list] = sys.argv[1:]): parser = self.get_parser() opt, unknown_opts = parser.parse_known_args(argv) if len(unknown_opts) > 0: print("Unknown args:", unknown_opts) - for name in self.__fields__: + for name in self.model_fields: if name not in self._excluded(): value = getattr(opt, name) if isinstance(value, ListConfig): @@ -54,10 +56,12 @@ class InvokeAISettings(BaseSettings): cls = self.__class__ type = get_args(get_type_hints(cls)["type"])[0] field_dict = dict({type: dict()}) - for name, field in self.__fields__.items(): + for name, field in self.model_fields.items(): if name in cls._excluded_from_yaml(): continue - category = field.field_info.extra.get("category") or "Uncategorized" + category = ( + field.json_schema_extra.get("category", "Uncategorized") if field.json_schema_extra else "Uncategorized" + ) value = getattr(self, name) if category not in field_dict[type]: field_dict[type][category] = dict() @@ -73,7 +77,7 @@ class InvokeAISettings(BaseSettings): else: settings_stanza = "Uncategorized" - env_prefix = getattr(cls.Config, "env_prefix", None) + env_prefix = getattr(cls.model_config, "env_prefix", None) env_prefix = env_prefix if env_prefix is not None else settings_stanza.upper() initconf = ( @@ -89,14 +93,18 @@ class InvokeAISettings(BaseSettings): for key, value in os.environ.items(): upcase_environ[key.upper()] = value - fields = cls.__fields__ + fields = cls.model_fields cls.argparse_groups = {} for name, field in fields.items(): if name not in cls._excluded(): current_default = field.default - category = field.field_info.extra.get("category", "Uncategorized") + category = ( + field.json_schema_extra.get("category", "Uncategorized") + if field.json_schema_extra + else "Uncategorized" + ) env_name = env_prefix + "_" + name if category in initconf and name in initconf.get(category): field.default = initconf.get(category).get(name) @@ -146,11 +154,6 @@ class InvokeAISettings(BaseSettings): "tiled_decode", ] - class Config: - env_file_encoding = "utf-8" - arbitrary_types_allowed = True - case_sensitive = True - @classmethod def add_field_argument(cls, command_parser, name: str, field, default_override=None): field_type = get_type_hints(cls).get(name) @@ -161,7 +164,7 @@ class InvokeAISettings(BaseSettings): if field.default_factory is None else field.default_factory() ) - if category := field.field_info.extra.get("category"): + if category := (field.json_schema_extra.get("category", None) if field.json_schema_extra else None): if category not in cls.argparse_groups: cls.argparse_groups[category] = command_parser.add_argument_group(category) argparse_group = cls.argparse_groups[category] @@ -169,7 +172,7 @@ class InvokeAISettings(BaseSettings): argparse_group = command_parser if get_origin(field_type) == Literal: - allowed_values = get_args(field.type_) + allowed_values = get_args(field.annotation) allowed_types = set() for val in allowed_values: allowed_types.add(type(val)) @@ -182,7 +185,7 @@ class InvokeAISettings(BaseSettings): type=field_type, default=default, choices=allowed_values, - help=field.field_info.description, + help=field.description, ) elif get_origin(field_type) == Union: @@ -191,7 +194,7 @@ class InvokeAISettings(BaseSettings): dest=name, type=int_or_float_or_str, default=default, - help=field.field_info.description, + help=field.description, ) elif get_origin(field_type) == list: @@ -199,17 +202,17 @@ class InvokeAISettings(BaseSettings): f"--{name}", dest=name, nargs="*", - type=field.type_, + type=field.annotation, default=default, - action=argparse.BooleanOptionalAction if field.type_ == bool else "store", - help=field.field_info.description, + action=argparse.BooleanOptionalAction if field.annotation == bool else "store", + help=field.description, ) else: argparse_group.add_argument( f"--{name}", dest=name, - type=field.type_, + type=field.annotation, default=default, - action=argparse.BooleanOptionalAction if field.type_ == bool else "store", - help=field.field_info.description, + action=argparse.BooleanOptionalAction if field.annotation == bool else "store", + help=field.description, ) diff --git a/invokeai/app/services/config/config_default.py b/invokeai/app/services/config/config_default.py index 2a42c99bd8..df01b65882 100644 --- a/invokeai/app/services/config/config_default.py +++ b/invokeai/app/services/config/config_default.py @@ -144,8 +144,8 @@ which is set to the desired top-level name. For example, to create a class InvokeBatch(InvokeAISettings): type: Literal["InvokeBatch"] = "InvokeBatch" - node_count : int = Field(default=1, description="Number of nodes to run on", category='Resources') - cpu_count : int = Field(default=8, description="Number of GPUs to run on per node", category='Resources') + node_count : int = Field(default=1, description="Number of nodes to run on", json_schema_extra=dict(category='Resources')) + cpu_count : int = Field(default=8, description="Number of GPUs to run on per node", json_schema_extra=dict(category='Resources')) This will now read and write from the "InvokeBatch" section of the config file, look for environment variables named INVOKEBATCH_*, and @@ -175,7 +175,8 @@ from pathlib import Path from typing import ClassVar, Dict, List, Literal, Optional, Union, get_type_hints from omegaconf import DictConfig, OmegaConf -from pydantic import Field, parse_obj_as +from pydantic import Field, TypeAdapter +from pydantic_settings import SettingsConfigDict from .config_base import InvokeAISettings @@ -185,6 +186,21 @@ LEGACY_INIT_FILE = Path("invokeai.init") DEFAULT_MAX_VRAM = 0.5 +class Categories(object): + WebServer = dict(category="Web Server") + Features = dict(category="Features") + Paths = dict(category="Paths") + Logging = dict(category="Logging") + Development = dict(category="Development") + Other = dict(category="Other") + ModelCache = dict(category="Model Cache") + Device = dict(category="Device") + Generation = dict(category="Generation") + Queue = dict(category="Queue") + Nodes = dict(category="Nodes") + MemoryPerformance = dict(category="Memory/Performance") + + class InvokeAIAppConfig(InvokeAISettings): """ Generate images using Stable Diffusion. Use "invokeai" to launch @@ -201,86 +217,88 @@ class InvokeAIAppConfig(InvokeAISettings): type: Literal["InvokeAI"] = "InvokeAI" # WEB - host : str = Field(default="127.0.0.1", description="IP address to bind to", category='Web Server') - port : int = Field(default=9090, description="Port to bind to", category='Web Server') - allow_origins : List[str] = Field(default=[], description="Allowed CORS origins", category='Web Server') - allow_credentials : bool = Field(default=True, description="Allow CORS credentials", category='Web Server') - allow_methods : List[str] = Field(default=["*"], description="Methods allowed for CORS", category='Web Server') - allow_headers : List[str] = Field(default=["*"], description="Headers allowed for CORS", category='Web Server') + host : str = Field(default="127.0.0.1", description="IP address to bind to", json_schema_extra=Categories.WebServer) + port : int = Field(default=9090, description="Port to bind to", json_schema_extra=Categories.WebServer) + allow_origins : List[str] = Field(default=[], description="Allowed CORS origins", json_schema_extra=Categories.WebServer) + allow_credentials : bool = Field(default=True, description="Allow CORS credentials", json_schema_extra=Categories.WebServer) + allow_methods : List[str] = Field(default=["*"], description="Methods allowed for CORS", json_schema_extra=Categories.WebServer) + allow_headers : List[str] = Field(default=["*"], description="Headers allowed for CORS", json_schema_extra=Categories.WebServer) # FEATURES - esrgan : bool = Field(default=True, description="Enable/disable upscaling code", category='Features') - internet_available : bool = Field(default=True, description="If true, attempt to download models on the fly; otherwise only use local models", category='Features') - log_tokenization : bool = Field(default=False, description="Enable logging of parsed prompt tokens.", category='Features') - patchmatch : bool = Field(default=True, description="Enable/disable patchmatch inpaint code", category='Features') - ignore_missing_core_models : bool = Field(default=False, description='Ignore missing models in models/core/convert', category='Features') + esrgan : bool = Field(default=True, description="Enable/disable upscaling code", json_schema_extra=Categories.Features) + internet_available : bool = Field(default=True, description="If true, attempt to download models on the fly; otherwise only use local models", json_schema_extra=Categories.Features) + log_tokenization : bool = Field(default=False, description="Enable logging of parsed prompt tokens.", json_schema_extra=Categories.Features) + patchmatch : bool = Field(default=True, description="Enable/disable patchmatch inpaint code", json_schema_extra=Categories.Features) + ignore_missing_core_models : bool = Field(default=False, description='Ignore missing models in models/core/convert', json_schema_extra=Categories.Features) # PATHS - root : Path = Field(default=None, description='InvokeAI runtime root directory', category='Paths') - autoimport_dir : Path = Field(default='autoimport', description='Path to a directory of models files to be imported on startup.', category='Paths') - lora_dir : Path = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', category='Paths') - embedding_dir : Path = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', category='Paths') - controlnet_dir : Path = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', category='Paths') - conf_path : Path = Field(default='configs/models.yaml', description='Path to models definition file', category='Paths') - models_dir : Path = Field(default='models', description='Path to the models directory', category='Paths') - legacy_conf_dir : Path = Field(default='configs/stable-diffusion', description='Path to directory of legacy checkpoint config files', category='Paths') - db_dir : Path = Field(default='databases', description='Path to InvokeAI databases directory', category='Paths') - outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths') - use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', category='Paths') - from_file : Path = Field(default=None, description='Take command input from the indicated file (command-line client only)', category='Paths') + root : Optional[Path] = Field(default=None, description='InvokeAI runtime root directory', json_schema_extra=Categories.Paths) + autoimport_dir : Optional[Path] = Field(default=Path('autoimport'), description='Path to a directory of models files to be imported on startup.', json_schema_extra=Categories.Paths) + lora_dir : Optional[Path] = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', json_schema_extra=Categories.Paths) + embedding_dir : Optional[Path] = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', json_schema_extra=Categories.Paths) + controlnet_dir : Optional[Path] = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', json_schema_extra=Categories.Paths) + conf_path : Optional[Path] = Field(default=Path('configs/models.yaml'), description='Path to models definition file', json_schema_extra=Categories.Paths) + models_dir : Optional[Path] = Field(default=Path('models'), description='Path to the models directory', json_schema_extra=Categories.Paths) + legacy_conf_dir : Optional[Path] = Field(default=Path('configs/stable-diffusion'), description='Path to directory of legacy checkpoint config files', json_schema_extra=Categories.Paths) + db_dir : Optional[Path] = Field(default=Path('databases'), description='Path to InvokeAI databases directory', json_schema_extra=Categories.Paths) + outdir : Optional[Path] = Field(default=Path('outputs'), description='Default folder for output images', json_schema_extra=Categories.Paths) + use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', json_schema_extra=Categories.Paths) + from_file : Optional[Path] = Field(default=None, description='Take command input from the indicated file (command-line client only)', json_schema_extra=Categories.Paths) # LOGGING - log_handlers : List[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=", "syslog=path|address:host:port", "http="', category="Logging") + log_handlers : List[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=", "syslog=path|address:host:port", "http="', json_schema_extra=Categories.Logging) # note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues - log_format : Literal['plain', 'color', 'syslog', 'legacy'] = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style', category="Logging") - log_level : Literal["debug", "info", "warning", "error", "critical"] = Field(default="info", description="Emit logging messages at this level or higher", category="Logging") - log_sql : bool = Field(default=False, description="Log SQL queries", category="Logging") + log_format : Literal['plain', 'color', 'syslog', 'legacy'] = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style', json_schema_extra=Categories.Logging) + log_level : Literal["debug", "info", "warning", "error", "critical"] = Field(default="info", description="Emit logging messages at this level or higher", json_schema_extra=Categories.Logging) + log_sql : bool = Field(default=False, description="Log SQL queries", json_schema_extra=Categories.Logging) - dev_reload : bool = Field(default=False, description="Automatically reload when Python sources are changed.", category="Development") + dev_reload : bool = Field(default=False, description="Automatically reload when Python sources are changed.", json_schema_extra=Categories.Development) - version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other") + version : bool = Field(default=False, description="Show InvokeAI version and exit", json_schema_extra=Categories.Other) # CACHE - ram : float = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", category="Model Cache", ) - vram : float = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", category="Model Cache", ) - lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", category="Model Cache", ) + ram : float = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) + vram : float = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) + lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", json_schema_extra=Categories.ModelCache, ) # DEVICE - device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", category="Device", ) - precision : Literal["auto", "float16", "float32", "autocast"] = Field(default="auto", description="Floating point precision", category="Device", ) + device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", json_schema_extra=Categories.Device) + precision : Literal["auto", "float16", "float32", "autocast"] = Field(default="auto", description="Floating point precision", json_schema_extra=Categories.Device) # GENERATION - sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category="Generation", ) - attention_type : Literal["auto", "normal", "xformers", "sliced", "torch-sdp"] = Field(default="auto", description="Attention type", category="Generation", ) - attention_slice_size: Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', category="Generation", ) - force_tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category="Generation",) - force_tiled_decode: bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category="Generation",) - png_compress_level : int = Field(default=6, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", category="Generation", ) + sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", json_schema_extra=Categories.Generation) + attention_type : Literal["auto", "normal", "xformers", "sliced", "torch-sdp"] = Field(default="auto", description="Attention type", json_schema_extra=Categories.Generation) + attention_slice_size: Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', json_schema_extra=Categories.Generation) + force_tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", json_schema_extra=Categories.Generation) + png_compress_level : int = Field(default=6, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", json_schema_extra=Categories.Generation) # QUEUE - max_queue_size : int = Field(default=10000, gt=0, description="Maximum number of items in the session queue", category="Queue", ) + max_queue_size : int = Field(default=10000, gt=0, description="Maximum number of items in the session queue", json_schema_extra=Categories.Queue) # NODES - allow_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to allow. Omit to allow all.", category="Nodes") - deny_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to deny. Omit to deny none.", category="Nodes") - node_cache_size : int = Field(default=512, description="How many cached nodes to keep in memory", category="Nodes", ) + allow_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to allow. Omit to allow all.", json_schema_extra=Categories.Nodes) + deny_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to deny. Omit to deny none.", json_schema_extra=Categories.Nodes) + node_cache_size : int = Field(default=512, description="How many cached nodes to keep in memory", json_schema_extra=Categories.Nodes) # DEPRECATED FIELDS - STILL HERE IN ORDER TO OBTAN VALUES FROM PRE-3.1 CONFIG FILES - always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance') - free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", category='Memory/Performance') - max_cache_size : Optional[float] = Field(default=None, gt=0, description="Maximum memory amount used by model cache for rapid switching", category='Memory/Performance') - max_vram_cache_size : Optional[float] = Field(default=None, ge=0, description="Amount of VRAM reserved for model storage", category='Memory/Performance') - xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance') - tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category='Memory/Performance') + always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", json_schema_extra=Categories.MemoryPerformance) + free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", json_schema_extra=Categories.MemoryPerformance) + max_cache_size : Optional[float] = Field(default=None, gt=0, description="Maximum memory amount used by model cache for rapid switching", json_schema_extra=Categories.MemoryPerformance) + max_vram_cache_size : Optional[float] = Field(default=None, ge=0, description="Amount of VRAM reserved for model storage", json_schema_extra=Categories.MemoryPerformance) + xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", json_schema_extra=Categories.MemoryPerformance) + tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", json_schema_extra=Categories.MemoryPerformance) # See InvokeAIAppConfig subclass below for CACHE and DEVICE categories # fmt: on - class Config: - validate_assignment = True - env_prefix = "INVOKEAI" + model_config = SettingsConfigDict(validate_assignment=True, env_prefix="INVOKEAI") - def parse_args(self, argv: Optional[list[str]] = None, conf: Optional[DictConfig] = None, clobber=False): + def parse_args( + self, + argv: Optional[list[str]] = None, + conf: Optional[DictConfig] = None, + clobber=False, + ): """ Update settings with contents of init file, environment, and command-line settings. @@ -308,7 +326,11 @@ class InvokeAIAppConfig(InvokeAISettings): if self.singleton_init and not clobber: hints = get_type_hints(self.__class__) for k in self.singleton_init: - setattr(self, k, parse_obj_as(hints[k], self.singleton_init[k])) + setattr( + self, + k, + TypeAdapter(hints[k]).validate_python(self.singleton_init[k]), + ) @classmethod def get_config(cls, **kwargs) -> InvokeAIAppConfig: diff --git a/invokeai/app/services/events/events_base.py b/invokeai/app/services/events/events_base.py index 8685db3717..ad00815151 100644 --- a/invokeai/app/services/events/events_base.py +++ b/invokeai/app/services/events/events_base.py @@ -2,7 +2,6 @@ from typing import Any, Optional -from invokeai.app.invocations.model import ModelInfo from invokeai.app.services.invocation_processor.invocation_processor_common import ProgressImage from invokeai.app.services.session_queue.session_queue_common import ( BatchStatus, @@ -11,6 +10,7 @@ from invokeai.app.services.session_queue.session_queue_common import ( SessionQueueStatus, ) from invokeai.app.util.misc import get_timestamp +from invokeai.backend.model_management.model_manager import ModelInfo from invokeai.backend.model_management.models.base import BaseModelType, ModelType, SubModelType @@ -55,7 +55,7 @@ class EventServiceBase: graph_execution_state_id=graph_execution_state_id, node_id=node.get("id"), source_node_id=source_node_id, - progress_image=progress_image.dict() if progress_image is not None else None, + progress_image=progress_image.model_dump() if progress_image is not None else None, step=step, order=order, total_steps=total_steps, @@ -291,8 +291,8 @@ class EventServiceBase: started_at=str(session_queue_item.started_at) if session_queue_item.started_at else None, completed_at=str(session_queue_item.completed_at) if session_queue_item.completed_at else None, ), - batch_status=batch_status.dict(), - queue_status=queue_status.dict(), + batch_status=batch_status.model_dump(), + queue_status=queue_status.model_dump(), ), ) diff --git a/invokeai/app/services/image_files/image_files_base.py b/invokeai/app/services/image_files/image_files_base.py index d998f9024b..5dde7b05d6 100644 --- a/invokeai/app/services/image_files/image_files_base.py +++ b/invokeai/app/services/image_files/image_files_base.py @@ -1,4 +1,5 @@ from abc import ABC, abstractmethod +from pathlib import Path from typing import Optional from PIL.Image import Image as PILImageType @@ -13,7 +14,7 @@ class ImageFileStorageBase(ABC): pass @abstractmethod - def get_path(self, image_name: str, thumbnail: bool = False) -> str: + def get_path(self, image_name: str, thumbnail: bool = False) -> Path: """Gets the internal path to an image or thumbnail.""" pass diff --git a/invokeai/app/services/image_records/image_records_base.py b/invokeai/app/services/image_records/image_records_base.py index 58db6feb23..107ff85f9b 100644 --- a/invokeai/app/services/image_records/image_records_base.py +++ b/invokeai/app/services/image_records/image_records_base.py @@ -34,8 +34,8 @@ class ImageRecordStorageBase(ABC): @abstractmethod def get_many( self, - offset: Optional[int] = None, - limit: Optional[int] = None, + offset: int = 0, + limit: int = 10, image_origin: Optional[ResourceOrigin] = None, categories: Optional[list[ImageCategory]] = None, is_intermediate: Optional[bool] = None, @@ -69,11 +69,11 @@ class ImageRecordStorageBase(ABC): image_category: ImageCategory, width: int, height: int, - session_id: Optional[str], - node_id: Optional[str], - metadata: Optional[dict], - is_intermediate: bool = False, - starred: bool = False, + is_intermediate: Optional[bool] = False, + starred: Optional[bool] = False, + session_id: Optional[str] = None, + node_id: Optional[str] = None, + metadata: Optional[dict] = None, ) -> datetime: """Saves an image record.""" pass diff --git a/invokeai/app/services/image_records/image_records_common.py b/invokeai/app/services/image_records/image_records_common.py index 39fac92048..5a6e5652c9 100644 --- a/invokeai/app/services/image_records/image_records_common.py +++ b/invokeai/app/services/image_records/image_records_common.py @@ -3,7 +3,7 @@ import datetime from enum import Enum from typing import Optional, Union -from pydantic import Extra, Field, StrictBool, StrictStr +from pydantic import Field, StrictBool, StrictStr from invokeai.app.util.metaenum import MetaEnum from invokeai.app.util.misc import get_iso_timestamp @@ -129,7 +129,9 @@ class ImageRecord(BaseModelExcludeNull): """The created timestamp of the image.""" updated_at: Union[datetime.datetime, str] = Field(description="The updated timestamp of the image.") """The updated timestamp of the image.""" - deleted_at: Union[datetime.datetime, str, None] = Field(description="The deleted timestamp of the image.") + deleted_at: Optional[Union[datetime.datetime, str]] = Field( + default=None, description="The deleted timestamp of the image." + ) """The deleted timestamp of the image.""" is_intermediate: bool = Field(description="Whether this is an intermediate image.") """Whether this is an intermediate image.""" @@ -147,7 +149,7 @@ class ImageRecord(BaseModelExcludeNull): """Whether this image is starred.""" -class ImageRecordChanges(BaseModelExcludeNull, extra=Extra.forbid): +class ImageRecordChanges(BaseModelExcludeNull, extra="allow"): """A set of changes to apply to an image record. Only limited changes are valid: diff --git a/invokeai/app/services/image_records/image_records_sqlite.py b/invokeai/app/services/image_records/image_records_sqlite.py index 864f4eff00..9793236d9c 100644 --- a/invokeai/app/services/image_records/image_records_sqlite.py +++ b/invokeai/app/services/image_records/image_records_sqlite.py @@ -2,7 +2,7 @@ import json import sqlite3 import threading from datetime import datetime -from typing import Optional, cast +from typing import Optional, Union, cast from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.sqlite import SqliteDatabase @@ -117,7 +117,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): """ ) - def get(self, image_name: str) -> Optional[ImageRecord]: + def get(self, image_name: str) -> ImageRecord: try: self._lock.acquire() @@ -223,8 +223,8 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): def get_many( self, - offset: Optional[int] = None, - limit: Optional[int] = None, + offset: int = 0, + limit: int = 10, image_origin: Optional[ResourceOrigin] = None, categories: Optional[list[ImageCategory]] = None, is_intermediate: Optional[bool] = None, @@ -249,7 +249,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): """ query_conditions = "" - query_params = [] + query_params: list[Union[int, str, bool]] = [] if image_origin is not None: query_conditions += """--sql @@ -387,13 +387,13 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): image_name: str, image_origin: ResourceOrigin, image_category: ImageCategory, - session_id: Optional[str], width: int, height: int, - node_id: Optional[str], - metadata: Optional[dict], - is_intermediate: bool = False, - starred: bool = False, + is_intermediate: Optional[bool] = False, + starred: Optional[bool] = False, + session_id: Optional[str] = None, + node_id: Optional[str] = None, + metadata: Optional[dict] = None, ) -> datetime: try: metadata_json = None if metadata is None else json.dumps(metadata) diff --git a/invokeai/app/services/images/images_base.py b/invokeai/app/services/images/images_base.py index 71581099a3..a611e9485d 100644 --- a/invokeai/app/services/images/images_base.py +++ b/invokeai/app/services/images/images_base.py @@ -49,7 +49,7 @@ class ImageServiceABC(ABC): node_id: Optional[str] = None, session_id: Optional[str] = None, board_id: Optional[str] = None, - is_intermediate: bool = False, + is_intermediate: Optional[bool] = False, metadata: Optional[dict] = None, workflow: Optional[str] = None, ) -> ImageDTO: diff --git a/invokeai/app/services/images/images_common.py b/invokeai/app/services/images/images_common.py index f8b63a16c1..325cecdd26 100644 --- a/invokeai/app/services/images/images_common.py +++ b/invokeai/app/services/images/images_common.py @@ -20,7 +20,9 @@ class ImageUrlsDTO(BaseModelExcludeNull): class ImageDTO(ImageRecord, ImageUrlsDTO): """Deserialized image record, enriched for the frontend.""" - board_id: Optional[str] = Field(description="The id of the board the image belongs to, if one exists.") + board_id: Optional[str] = Field( + default=None, description="The id of the board the image belongs to, if one exists." + ) """The id of the board the image belongs to, if one exists.""" pass @@ -34,7 +36,7 @@ def image_record_to_dto( ) -> ImageDTO: """Converts an image record to an image DTO.""" return ImageDTO( - **image_record.dict(), + **image_record.model_dump(), image_url=image_url, thumbnail_url=thumbnail_url, board_id=board_id, diff --git a/invokeai/app/services/images/images_default.py b/invokeai/app/services/images/images_default.py index 9134b9a4f6..d4e473b8e4 100644 --- a/invokeai/app/services/images/images_default.py +++ b/invokeai/app/services/images/images_default.py @@ -41,7 +41,7 @@ class ImageService(ImageServiceABC): node_id: Optional[str] = None, session_id: Optional[str] = None, board_id: Optional[str] = None, - is_intermediate: bool = False, + is_intermediate: Optional[bool] = False, metadata: Optional[dict] = None, workflow: Optional[str] = None, ) -> ImageDTO: @@ -146,7 +146,7 @@ class ImageService(ImageServiceABC): self.__invoker.services.logger.error("Problem getting image DTO") raise e - def get_metadata(self, image_name: str) -> Optional[ImageMetadata]: + def get_metadata(self, image_name: str) -> ImageMetadata: try: image_record = self.__invoker.services.image_records.get(image_name) metadata = self.__invoker.services.image_records.get_metadata(image_name) @@ -174,7 +174,7 @@ class ImageService(ImageServiceABC): def get_path(self, image_name: str, thumbnail: bool = False) -> str: try: - return self.__invoker.services.image_files.get_path(image_name, thumbnail) + return str(self.__invoker.services.image_files.get_path(image_name, thumbnail)) except Exception as e: self.__invoker.services.logger.error("Problem getting image path") raise e diff --git a/invokeai/app/services/invocation_cache/invocation_cache_memory.py b/invokeai/app/services/invocation_cache/invocation_cache_memory.py index 817dbb958e..4a503b3c6b 100644 --- a/invokeai/app/services/invocation_cache/invocation_cache_memory.py +++ b/invokeai/app/services/invocation_cache/invocation_cache_memory.py @@ -58,7 +58,12 @@ class MemoryInvocationCache(InvocationCacheBase): # If the cache is full, we need to remove the least used number_to_delete = len(self._cache) + 1 - self._max_cache_size self._delete_oldest_access(number_to_delete) - self._cache[key] = CachedItem(invocation_output, invocation_output.json()) + self._cache[key] = CachedItem( + invocation_output, + invocation_output.model_dump_json( + warnings=False, exclude_defaults=True, exclude_unset=True, include={"type"} + ), + ) def _delete_oldest_access(self, number_to_delete: int) -> None: number_to_delete = min(number_to_delete, len(self._cache)) @@ -85,7 +90,7 @@ class MemoryInvocationCache(InvocationCacheBase): @staticmethod def create_key(invocation: BaseInvocation) -> int: - return hash(invocation.json(exclude={"id"})) + return hash(invocation.model_dump_json(exclude={"id"}, warnings=False)) def disable(self) -> None: with self._lock: diff --git a/invokeai/app/services/invocation_processor/invocation_processor_default.py b/invokeai/app/services/invocation_processor/invocation_processor_default.py index 349c4a03e4..c59fb678ef 100644 --- a/invokeai/app/services/invocation_processor/invocation_processor_default.py +++ b/invokeai/app/services/invocation_processor/invocation_processor_default.py @@ -89,7 +89,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC): queue_item_id=queue_item.session_queue_item_id, queue_id=queue_item.session_queue_id, graph_execution_state_id=graph_execution_state.id, - node=invocation.dict(), + node=invocation.model_dump(), source_node_id=source_node_id, ) @@ -127,9 +127,9 @@ class DefaultInvocationProcessor(InvocationProcessorABC): queue_item_id=queue_item.session_queue_item_id, queue_id=queue_item.session_queue_id, graph_execution_state_id=graph_execution_state.id, - node=invocation.dict(), + node=invocation.model_dump(), source_node_id=source_node_id, - result=outputs.dict(), + result=outputs.model_dump(), ) self.__invoker.services.performance_statistics.log_stats() @@ -157,7 +157,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC): queue_item_id=queue_item.session_queue_item_id, queue_id=queue_item.session_queue_id, graph_execution_state_id=graph_execution_state.id, - node=invocation.dict(), + node=invocation.model_dump(), source_node_id=source_node_id, error_type=e.__class__.__name__, error=error, @@ -187,7 +187,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC): queue_item_id=queue_item.session_queue_item_id, queue_id=queue_item.session_queue_id, graph_execution_state_id=graph_execution_state.id, - node=invocation.dict(), + node=invocation.model_dump(), source_node_id=source_node_id, error_type=e.__class__.__name__, error=traceback.format_exc(), diff --git a/invokeai/app/services/invocation_stats/invocation_stats_default.py b/invokeai/app/services/invocation_stats/invocation_stats_default.py index 2041ab6190..be019b6820 100644 --- a/invokeai/app/services/invocation_stats/invocation_stats_default.py +++ b/invokeai/app/services/invocation_stats/invocation_stats_default.py @@ -72,7 +72,7 @@ class InvocationStatsService(InvocationStatsServiceBase): ) self.collector.update_invocation_stats( graph_id=self.graph_id, - invocation_type=self.invocation.type, # type: ignore - `type` is not on the `BaseInvocation` model, but *is* on all invocations + invocation_type=self.invocation.type, # type: ignore # `type` is not on the `BaseInvocation` model, but *is* on all invocations time_used=time.time() - self.start_time, vram_used=torch.cuda.max_memory_allocated() / GIG if torch.cuda.is_available() else 0.0, ) diff --git a/invokeai/app/services/item_storage/item_storage_sqlite.py b/invokeai/app/services/item_storage/item_storage_sqlite.py index 1d6008e90f..1bb9429130 100644 --- a/invokeai/app/services/item_storage/item_storage_sqlite.py +++ b/invokeai/app/services/item_storage/item_storage_sqlite.py @@ -2,7 +2,7 @@ import sqlite3 import threading from typing import Generic, Optional, TypeVar, get_args -from pydantic import BaseModel, parse_raw_as +from pydantic import BaseModel, TypeAdapter from invokeai.app.services.shared.pagination import PaginatedResults from invokeai.app.services.shared.sqlite import SqliteDatabase @@ -18,6 +18,7 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): _cursor: sqlite3.Cursor _id_field: str _lock: threading.RLock + _adapter: Optional[TypeAdapter[T]] def __init__(self, db: SqliteDatabase, table_name: str, id_field: str = "id"): super().__init__() @@ -27,6 +28,7 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): self._table_name = table_name self._id_field = id_field # TODO: validate that T has this field self._cursor = self._conn.cursor() + self._adapter: Optional[TypeAdapter[T]] = None self._create_table() @@ -45,16 +47,21 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): self._lock.release() def _parse_item(self, item: str) -> T: - # __orig_class__ is technically an implementation detail of the typing module, not a supported API - item_type = get_args(self.__orig_class__)[0] # type: ignore - return parse_raw_as(item_type, item) + if self._adapter is None: + """ + We don't get access to `__orig_class__` in `__init__()`, and we need this before start(), so + we can create it when it is first needed instead. + __orig_class__ is technically an implementation detail of the typing module, not a supported API + """ + self._adapter = TypeAdapter(get_args(self.__orig_class__)[0]) # type: ignore [attr-defined] + return self._adapter.validate_json(item) def set(self, item: T): try: self._lock.acquire() self._cursor.execute( f"""INSERT OR REPLACE INTO {self._table_name} (item) VALUES (?);""", - (item.json(),), + (item.model_dump_json(warnings=False, exclude_none=True),), ) self._conn.commit() finally: diff --git a/invokeai/app/services/model_manager/model_manager_base.py b/invokeai/app/services/model_manager/model_manager_base.py index bb9110ba0a..4c2fc4c085 100644 --- a/invokeai/app/services/model_manager/model_manager_base.py +++ b/invokeai/app/services/model_manager/model_manager_base.py @@ -231,7 +231,7 @@ class ModelManagerServiceBase(ABC): def merge_models( self, model_names: List[str] = Field( - default=None, min_items=2, max_items=3, description="List of model names to merge" + default=None, min_length=2, max_length=3, description="List of model names to merge" ), base_model: Union[BaseModelType, str] = Field( default=None, description="Base model shared by all models to be merged" diff --git a/invokeai/app/services/model_manager/model_manager_default.py b/invokeai/app/services/model_manager/model_manager_default.py index 263f804b4d..cdb3e59a91 100644 --- a/invokeai/app/services/model_manager/model_manager_default.py +++ b/invokeai/app/services/model_manager/model_manager_default.py @@ -327,7 +327,7 @@ class ModelManagerService(ModelManagerServiceBase): def merge_models( self, model_names: List[str] = Field( - default=None, min_items=2, max_items=3, description="List of model names to merge" + default=None, min_length=2, max_length=3, description="List of model names to merge" ), base_model: Union[BaseModelType, str] = Field( default=None, description="Base model shared by all models to be merged" diff --git a/invokeai/app/services/session_queue/session_queue_common.py b/invokeai/app/services/session_queue/session_queue_common.py index 2d40a5b0c4..48e1da83b5 100644 --- a/invokeai/app/services/session_queue/session_queue_common.py +++ b/invokeai/app/services/session_queue/session_queue_common.py @@ -3,8 +3,8 @@ import json from itertools import chain, product from typing import Generator, Iterable, Literal, NamedTuple, Optional, TypeAlias, Union, cast -from pydantic import BaseModel, Field, StrictStr, parse_raw_as, root_validator, validator -from pydantic.json import pydantic_encoder +from pydantic import BaseModel, ConfigDict, Field, StrictStr, TypeAdapter, field_validator, model_validator +from pydantic_core import to_jsonable_python from invokeai.app.invocations.baseinvocation import BaseInvocation from invokeai.app.services.shared.graph import Graph, GraphExecutionState, NodeNotFoundError @@ -17,7 +17,7 @@ class BatchZippedLengthError(ValueError): """Raise when a batch has items of different lengths.""" -class BatchItemsTypeError(TypeError): +class BatchItemsTypeError(ValueError): # this cannot be a TypeError in pydantic v2 """Raise when a batch has items of different types.""" @@ -70,7 +70,7 @@ class Batch(BaseModel): default=1, ge=1, description="Int stating how many times to iterate through all possible batch indices" ) - @validator("data") + @field_validator("data") def validate_lengths(cls, v: Optional[BatchDataCollection]): if v is None: return v @@ -81,7 +81,7 @@ class Batch(BaseModel): raise BatchZippedLengthError("Zipped batch items must all have the same length") return v - @validator("data") + @field_validator("data") def validate_types(cls, v: Optional[BatchDataCollection]): if v is None: return v @@ -94,7 +94,7 @@ class Batch(BaseModel): raise BatchItemsTypeError("All items in a batch must have the same type") return v - @validator("data") + @field_validator("data") def validate_unique_field_mappings(cls, v: Optional[BatchDataCollection]): if v is None: return v @@ -107,34 +107,35 @@ class Batch(BaseModel): paths.add(pair) return v - @root_validator(skip_on_failure=True) + @model_validator(mode="after") def validate_batch_nodes_and_edges(cls, values): - batch_data_collection = cast(Optional[BatchDataCollection], values["data"]) + batch_data_collection = cast(Optional[BatchDataCollection], values.data) if batch_data_collection is None: return values - graph = cast(Graph, values["graph"]) + graph = cast(Graph, values.graph) for batch_data_list in batch_data_collection: for batch_data in batch_data_list: try: node = cast(BaseInvocation, graph.get_node(batch_data.node_path)) except NodeNotFoundError: raise NodeNotFoundError(f"Node {batch_data.node_path} not found in graph") - if batch_data.field_name not in node.__fields__: + if batch_data.field_name not in node.model_fields: raise NodeNotFoundError(f"Field {batch_data.field_name} not found in node {batch_data.node_path}") return values - @validator("graph") + @field_validator("graph") def validate_graph(cls, v: Graph): v.validate_self() return v - class Config: - schema_extra = { - "required": [ + model_config = ConfigDict( + json_schema_extra=dict( + required=[ "graph", "runs", ] - } + ) + ) # endregion Batch @@ -146,15 +147,21 @@ DEFAULT_QUEUE_ID = "default" QUEUE_ITEM_STATUS = Literal["pending", "in_progress", "completed", "failed", "canceled"] +adapter_NodeFieldValue = TypeAdapter(list[NodeFieldValue]) + def get_field_values(queue_item_dict: dict) -> Optional[list[NodeFieldValue]]: field_values_raw = queue_item_dict.get("field_values", None) - return parse_raw_as(list[NodeFieldValue], field_values_raw) if field_values_raw is not None else None + return adapter_NodeFieldValue.validate_json(field_values_raw) if field_values_raw is not None else None + + +adapter_GraphExecutionState = TypeAdapter(GraphExecutionState) def get_session(queue_item_dict: dict) -> GraphExecutionState: session_raw = queue_item_dict.get("session", "{}") - return parse_raw_as(GraphExecutionState, session_raw) + session = adapter_GraphExecutionState.validate_json(session_raw, strict=False) + return session class SessionQueueItemWithoutGraph(BaseModel): @@ -178,14 +185,14 @@ class SessionQueueItemWithoutGraph(BaseModel): ) @classmethod - def from_dict(cls, queue_item_dict: dict) -> "SessionQueueItemDTO": + def queue_item_dto_from_dict(cls, queue_item_dict: dict) -> "SessionQueueItemDTO": # must parse these manually queue_item_dict["field_values"] = get_field_values(queue_item_dict) return SessionQueueItemDTO(**queue_item_dict) - class Config: - schema_extra = { - "required": [ + model_config = ConfigDict( + json_schema_extra=dict( + required=[ "item_id", "status", "batch_id", @@ -196,7 +203,8 @@ class SessionQueueItemWithoutGraph(BaseModel): "created_at", "updated_at", ] - } + ) + ) class SessionQueueItemDTO(SessionQueueItemWithoutGraph): @@ -207,15 +215,15 @@ class SessionQueueItem(SessionQueueItemWithoutGraph): session: GraphExecutionState = Field(description="The fully-populated session to be executed") @classmethod - def from_dict(cls, queue_item_dict: dict) -> "SessionQueueItem": + def queue_item_from_dict(cls, queue_item_dict: dict) -> "SessionQueueItem": # must parse these manually queue_item_dict["field_values"] = get_field_values(queue_item_dict) queue_item_dict["session"] = get_session(queue_item_dict) return SessionQueueItem(**queue_item_dict) - class Config: - schema_extra = { - "required": [ + model_config = ConfigDict( + json_schema_extra=dict( + required=[ "item_id", "status", "batch_id", @@ -227,7 +235,8 @@ class SessionQueueItem(SessionQueueItemWithoutGraph): "created_at", "updated_at", ] - } + ) + ) # endregion Queue Items @@ -321,7 +330,7 @@ def populate_graph(graph: Graph, node_field_values: Iterable[NodeFieldValue]) -> """ Populates the given graph with the given batch data items. """ - graph_clone = graph.copy(deep=True) + graph_clone = graph.model_copy(deep=True) for item in node_field_values: node = graph_clone.get_node(item.node_path) if node is None: @@ -354,7 +363,7 @@ def create_session_nfv_tuples( for item in batch_datum.items ] node_field_values_to_zip.append(node_field_values) - data.append(list(zip(*node_field_values_to_zip))) + data.append(list(zip(*node_field_values_to_zip))) # type: ignore [arg-type] # create generator to yield session,nfv tuples count = 0 @@ -409,11 +418,11 @@ def prepare_values_to_insert(queue_id: str, batch: Batch, priority: int, max_new values_to_insert.append( SessionQueueValueToInsert( queue_id, # queue_id - session.json(), # session (json) + session.model_dump_json(warnings=False, exclude_none=True), # session (json) session.id, # session_id batch.batch_id, # batch_id # must use pydantic_encoder bc field_values is a list of models - json.dumps(field_values, default=pydantic_encoder) if field_values else None, # field_values (json) + json.dumps(field_values, default=to_jsonable_python) if field_values else None, # field_values (json) priority, # priority ) ) @@ -421,3 +430,6 @@ def prepare_values_to_insert(queue_id: str, batch: Batch, priority: int, max_new # endregion Util + +Batch.model_rebuild(force=True) +SessionQueueItem.model_rebuild(force=True) diff --git a/invokeai/app/services/session_queue/session_queue_sqlite.py b/invokeai/app/services/session_queue/session_queue_sqlite.py index eb82667be5..4daab9cdbc 100644 --- a/invokeai/app/services/session_queue/session_queue_sqlite.py +++ b/invokeai/app/services/session_queue/session_queue_sqlite.py @@ -277,8 +277,8 @@ class SqliteSessionQueue(SessionQueueBase): if result is None: raise SessionQueueItemNotFoundError(f"No queue item with batch id {enqueue_result.batch.batch_id}") return EnqueueGraphResult( - **enqueue_result.dict(), - queue_item=SessionQueueItemDTO.from_dict(dict(result)), + **enqueue_result.model_dump(), + queue_item=SessionQueueItemDTO.queue_item_dto_from_dict(dict(result)), ) def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult: @@ -351,7 +351,7 @@ class SqliteSessionQueue(SessionQueueBase): self.__lock.release() if result is None: return None - queue_item = SessionQueueItem.from_dict(dict(result)) + queue_item = SessionQueueItem.queue_item_from_dict(dict(result)) queue_item = self._set_queue_item_status(item_id=queue_item.item_id, status="in_progress") return queue_item @@ -380,7 +380,7 @@ class SqliteSessionQueue(SessionQueueBase): self.__lock.release() if result is None: return None - return SessionQueueItem.from_dict(dict(result)) + return SessionQueueItem.queue_item_from_dict(dict(result)) def get_current(self, queue_id: str) -> Optional[SessionQueueItem]: try: @@ -404,7 +404,7 @@ class SqliteSessionQueue(SessionQueueBase): self.__lock.release() if result is None: return None - return SessionQueueItem.from_dict(dict(result)) + return SessionQueueItem.queue_item_from_dict(dict(result)) def _set_queue_item_status( self, item_id: int, status: QUEUE_ITEM_STATUS, error: Optional[str] = None @@ -564,7 +564,7 @@ class SqliteSessionQueue(SessionQueueBase): queue_item = self.get_queue_item(item_id) if queue_item.status not in ["canceled", "failed", "completed"]: status = "failed" if error is not None else "canceled" - queue_item = self._set_queue_item_status(item_id=item_id, status=status, error=error) + queue_item = self._set_queue_item_status(item_id=item_id, status=status, error=error) # type: ignore [arg-type] # mypy seems to not narrow the Literals here self.__invoker.services.queue.cancel(queue_item.session_id) self.__invoker.services.events.emit_session_canceled( queue_item_id=queue_item.item_id, @@ -699,7 +699,7 @@ class SqliteSessionQueue(SessionQueueBase): self.__lock.release() if result is None: raise SessionQueueItemNotFoundError(f"No queue item with id {item_id}") - return SessionQueueItem.from_dict(dict(result)) + return SessionQueueItem.queue_item_from_dict(dict(result)) def list_queue_items( self, @@ -751,7 +751,7 @@ class SqliteSessionQueue(SessionQueueBase): params.append(limit + 1) self.__cursor.execute(query, params) results = cast(list[sqlite3.Row], self.__cursor.fetchall()) - items = [SessionQueueItemDTO.from_dict(dict(result)) for result in results] + items = [SessionQueueItemDTO.queue_item_dto_from_dict(dict(result)) for result in results] has_more = False if len(items) > limit: # remove the extra item diff --git a/invokeai/app/services/shared/default_graphs.py b/invokeai/app/services/shared/default_graphs.py index b2d0a1f0b6..9a6e2456cb 100644 --- a/invokeai/app/services/shared/default_graphs.py +++ b/invokeai/app/services/shared/default_graphs.py @@ -80,10 +80,10 @@ def create_system_graphs(graph_library: ItemStorageABC[LibraryGraph]) -> list[Li # TODO: Uncomment this when we are ready to fix this up to prevent breaking changes graphs: list[LibraryGraph] = list() - # text_to_image = graph_library.get(default_text_to_image_graph_id) + text_to_image = graph_library.get(default_text_to_image_graph_id) - # # TODO: Check if the graph is the same as the default one, and if not, update it - # #if text_to_image is None: + # TODO: Check if the graph is the same as the default one, and if not, update it + # if text_to_image is None: text_to_image = create_text_to_image() graph_library.set(text_to_image) diff --git a/invokeai/app/services/shared/graph.py b/invokeai/app/services/shared/graph.py index dab045af9d..8f974f7c6b 100644 --- a/invokeai/app/services/shared/graph.py +++ b/invokeai/app/services/shared/graph.py @@ -5,7 +5,7 @@ import itertools from typing import Annotated, Any, Optional, Union, get_args, get_origin, get_type_hints import networkx as nx -from pydantic import BaseModel, root_validator, validator +from pydantic import BaseModel, ConfigDict, field_validator, model_validator from pydantic.fields import Field # Importing * is bad karma but needed here for node detection @@ -235,7 +235,8 @@ class CollectInvocationOutput(BaseInvocationOutput): class CollectInvocation(BaseInvocation): """Collects values into a collection""" - item: Any = InputField( + item: Optional[Any] = InputField( + default=None, description="The item to collect (all inputs must be of the same type)", ui_type=UIType.CollectionItem, title="Collection Item", @@ -250,8 +251,8 @@ class CollectInvocation(BaseInvocation): return CollectInvocationOutput(collection=copy.copy(self.collection)) -InvocationsUnion = Union[BaseInvocation.get_invocations()] # type: ignore -InvocationOutputsUnion = Union[BaseInvocationOutput.get_all_subclasses_tuple()] # type: ignore +InvocationsUnion: Any = BaseInvocation.get_invocations_union() +InvocationOutputsUnion: Any = BaseInvocationOutput.get_outputs_union() class Graph(BaseModel): @@ -378,13 +379,13 @@ class Graph(BaseModel): raise NodeNotFoundError(f"Edge destination node {edge.destination.node_id} does not exist in the graph") # output fields are not on the node object directly, they are on the output type - if edge.source.field not in source_node.get_output_type().__fields__: + if edge.source.field not in source_node.get_output_type().model_fields: raise NodeFieldNotFoundError( f"Edge source field {edge.source.field} does not exist in node {edge.source.node_id}" ) # input fields are on the node - if edge.destination.field not in destination_node.__fields__: + if edge.destination.field not in destination_node.model_fields: raise NodeFieldNotFoundError( f"Edge destination field {edge.destination.field} does not exist in node {edge.destination.node_id}" ) @@ -395,24 +396,24 @@ class Graph(BaseModel): raise CyclicalGraphError("Graph contains cycles") # Validate all edge connections are valid - for e in self.edges: + for edge in self.edges: if not are_connections_compatible( - self.get_node(e.source.node_id), - e.source.field, - self.get_node(e.destination.node_id), - e.destination.field, + self.get_node(edge.source.node_id), + edge.source.field, + self.get_node(edge.destination.node_id), + edge.destination.field, ): raise InvalidEdgeError( - f"Invalid edge from {e.source.node_id}.{e.source.field} to {e.destination.node_id}.{e.destination.field}" + f"Invalid edge from {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}" ) # Validate all iterators & collectors # TODO: may need to validate all iterators & collectors in subgraphs so edge connections in parent graphs will be available - for n in self.nodes.values(): - if isinstance(n, IterateInvocation) and not self._is_iterator_connection_valid(n.id): - raise InvalidEdgeError(f"Invalid iterator node {n.id}") - if isinstance(n, CollectInvocation) and not self._is_collector_connection_valid(n.id): - raise InvalidEdgeError(f"Invalid collector node {n.id}") + for node in self.nodes.values(): + if isinstance(node, IterateInvocation) and not self._is_iterator_connection_valid(node.id): + raise InvalidEdgeError(f"Invalid iterator node {node.id}") + if isinstance(node, CollectInvocation) and not self._is_collector_connection_valid(node.id): + raise InvalidEdgeError(f"Invalid collector node {node.id}") return None @@ -594,7 +595,7 @@ class Graph(BaseModel): def _get_input_edges_and_graphs( self, node_path: str, prefix: Optional[str] = None - ) -> list[tuple["Graph", str, Edge]]: + ) -> list[tuple["Graph", Union[str, None], Edge]]: """Gets all input edges for a node along with the graph they are in and the graph's path""" edges = list() @@ -636,7 +637,7 @@ class Graph(BaseModel): def _get_output_edges_and_graphs( self, node_path: str, prefix: Optional[str] = None - ) -> list[tuple["Graph", str, Edge]]: + ) -> list[tuple["Graph", Union[str, None], Edge]]: """Gets all output edges for a node along with the graph they are in and the graph's path""" edges = list() @@ -817,15 +818,15 @@ class GraphExecutionState(BaseModel): default_factory=dict, ) - @validator("graph") + @field_validator("graph") def graph_is_valid(cls, v: Graph): """Validates that the graph is valid""" v.validate_self() return v - class Config: - schema_extra = { - "required": [ + model_config = ConfigDict( + json_schema_extra=dict( + required=[ "id", "graph", "execution_graph", @@ -836,7 +837,8 @@ class GraphExecutionState(BaseModel): "prepared_source_mapping", "source_prepared_mapping", ] - } + ) + ) def next(self) -> Optional[BaseInvocation]: """Gets the next node ready to execute.""" @@ -910,7 +912,7 @@ class GraphExecutionState(BaseModel): input_collection = getattr(input_collection_prepared_node_output, input_collection_edge.source.field) self_iteration_count = len(input_collection) - new_nodes = list() + new_nodes: list[str] = list() if self_iteration_count == 0: # TODO: should this raise a warning? It might just happen if an empty collection is input, and should be valid. return new_nodes @@ -920,7 +922,7 @@ class GraphExecutionState(BaseModel): # Create new edges for this iteration # For collect nodes, this may contain multiple inputs to the same field - new_edges = list() + new_edges: list[Edge] = list() for edge in input_edges: for input_node_id in (n[1] for n in iteration_node_map if n[0] == edge.source.node_id): new_edge = Edge( @@ -1179,18 +1181,18 @@ class LibraryGraph(BaseModel): description="The outputs exposed by this graph", default_factory=list ) - @validator("exposed_inputs", "exposed_outputs") - def validate_exposed_aliases(cls, v): + @field_validator("exposed_inputs", "exposed_outputs") + def validate_exposed_aliases(cls, v: list[Union[ExposedNodeInput, ExposedNodeOutput]]): if len(v) != len(set(i.alias for i in v)): raise ValueError("Duplicate exposed alias") return v - @root_validator + @model_validator(mode="after") def validate_exposed_nodes(cls, values): - graph = values["graph"] + graph = values.graph # Validate exposed inputs - for exposed_input in values["exposed_inputs"]: + for exposed_input in values.exposed_inputs: if not graph.has_node(exposed_input.node_path): raise ValueError(f"Exposed input node {exposed_input.node_path} does not exist") node = graph.get_node(exposed_input.node_path) @@ -1200,7 +1202,7 @@ class LibraryGraph(BaseModel): ) # Validate exposed outputs - for exposed_output in values["exposed_outputs"]: + for exposed_output in values.exposed_outputs: if not graph.has_node(exposed_output.node_path): raise ValueError(f"Exposed output node {exposed_output.node_path} does not exist") node = graph.get_node(exposed_output.node_path) @@ -1212,4 +1214,6 @@ class LibraryGraph(BaseModel): return values -GraphInvocation.update_forward_refs() +GraphInvocation.model_rebuild(force=True) +Graph.model_rebuild(force=True) +GraphExecutionState.model_rebuild(force=True) diff --git a/invokeai/app/services/shared/pagination.py b/invokeai/app/services/shared/pagination.py index 85c8fb984e..ea342b1101 100644 --- a/invokeai/app/services/shared/pagination.py +++ b/invokeai/app/services/shared/pagination.py @@ -1,12 +1,11 @@ from typing import Generic, TypeVar from pydantic import BaseModel, Field -from pydantic.generics import GenericModel GenericBaseModel = TypeVar("GenericBaseModel", bound=BaseModel) -class CursorPaginatedResults(GenericModel, Generic[GenericBaseModel]): +class CursorPaginatedResults(BaseModel, Generic[GenericBaseModel]): """ Cursor-paginated results Generic must be a Pydantic model @@ -17,7 +16,7 @@ class CursorPaginatedResults(GenericModel, Generic[GenericBaseModel]): items: list[GenericBaseModel] = Field(..., description="Items") -class OffsetPaginatedResults(GenericModel, Generic[GenericBaseModel]): +class OffsetPaginatedResults(BaseModel, Generic[GenericBaseModel]): """ Offset-paginated results Generic must be a Pydantic model @@ -29,7 +28,7 @@ class OffsetPaginatedResults(GenericModel, Generic[GenericBaseModel]): items: list[GenericBaseModel] = Field(description="Items") -class PaginatedResults(GenericModel, Generic[GenericBaseModel]): +class PaginatedResults(BaseModel, Generic[GenericBaseModel]): """ Paginated results Generic must be a Pydantic model diff --git a/invokeai/app/util/controlnet_utils.py b/invokeai/app/util/controlnet_utils.py index e6f34a4c44..51ceec2edd 100644 --- a/invokeai/app/util/controlnet_utils.py +++ b/invokeai/app/util/controlnet_utils.py @@ -265,7 +265,7 @@ def np_img_resize(np_img: np.ndarray, resize_mode: str, h: int, w: int, device: def prepare_control_image( - image: Image, + image: Image.Image, width: int, height: int, num_channels: int = 3, diff --git a/invokeai/app/util/misc.py b/invokeai/app/util/misc.py index 6d56652ed4..910b05d8dd 100644 --- a/invokeai/app/util/misc.py +++ b/invokeai/app/util/misc.py @@ -1,4 +1,5 @@ import datetime +import typing import uuid import numpy as np @@ -27,3 +28,8 @@ def get_random_seed(): def uuid_string(): res = uuid.uuid4() return str(res) + + +def is_optional(value: typing.Any): + """Checks if a value is typed as Optional. Note that Optional is sugar for Union[x, None].""" + return typing.get_origin(value) is typing.Union and type(None) in typing.get_args(value) diff --git a/invokeai/app/util/model_exclude_null.py b/invokeai/app/util/model_exclude_null.py index b75f127ec7..6da41039b4 100644 --- a/invokeai/app/util/model_exclude_null.py +++ b/invokeai/app/util/model_exclude_null.py @@ -13,11 +13,11 @@ From https://github.com/tiangolo/fastapi/discussions/8882#discussioncomment-5154 class BaseModelExcludeNull(BaseModel): - def dict(self, *args, **kwargs) -> dict[str, Any]: + def model_dump(self, *args, **kwargs) -> dict[str, Any]: """ Override the default dict method to exclude None values in the response """ kwargs.pop("exclude_none", None) - return super().dict(*args, exclude_none=True, **kwargs) + return super().model_dump(*args, exclude_none=True, **kwargs) pass diff --git a/invokeai/assets/__init__.py b/invokeai/assets/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/backend/image_util/txt2mask.py b/invokeai/backend/image_util/txt2mask.py index 12db54b0db..de0c6a1652 100644 --- a/invokeai/backend/image_util/txt2mask.py +++ b/invokeai/backend/image_util/txt2mask.py @@ -41,18 +41,18 @@ config = InvokeAIAppConfig.get_config() class SegmentedGrayscale(object): - def __init__(self, image: Image, heatmap: torch.Tensor): + def __init__(self, image: Image.Image, heatmap: torch.Tensor): self.heatmap = heatmap self.image = image - def to_grayscale(self, invert: bool = False) -> Image: + def to_grayscale(self, invert: bool = False) -> Image.Image: return self._rescale(Image.fromarray(np.uint8(255 - self.heatmap * 255 if invert else self.heatmap * 255))) - def to_mask(self, threshold: float = 0.5) -> Image: + def to_mask(self, threshold: float = 0.5) -> Image.Image: discrete_heatmap = self.heatmap.lt(threshold).int() return self._rescale(Image.fromarray(np.uint8(discrete_heatmap * 255), mode="L")) - def to_transparent(self, invert: bool = False) -> Image: + def to_transparent(self, invert: bool = False) -> Image.Image: transparent_image = self.image.copy() # For img2img, we want the selected regions to be transparent, # but to_grayscale() returns the opposite. Thus invert. @@ -61,7 +61,7 @@ class SegmentedGrayscale(object): return transparent_image # unscales and uncrops the 352x352 heatmap so that it matches the image again - def _rescale(self, heatmap: Image) -> Image: + def _rescale(self, heatmap: Image.Image) -> Image.Image: size = self.image.width if (self.image.width > self.image.height) else self.image.height resized_image = heatmap.resize((size, size), resample=Image.Resampling.LANCZOS) return resized_image.crop((0, 0, self.image.width, self.image.height)) @@ -82,7 +82,7 @@ class Txt2Mask(object): self.model = CLIPSegForImageSegmentation.from_pretrained(CLIPSEG_MODEL, cache_dir=config.cache_dir) @torch.no_grad() - def segment(self, image, prompt: str) -> SegmentedGrayscale: + def segment(self, image: Image.Image, prompt: str) -> SegmentedGrayscale: """ Given a prompt string such as "a bagel", tries to identify the object in the provided image and returns a SegmentedGrayscale object in which the brighter @@ -99,7 +99,7 @@ class Txt2Mask(object): heatmap = torch.sigmoid(outputs.logits) return SegmentedGrayscale(image, heatmap) - def _scale_and_crop(self, image: Image) -> Image: + def _scale_and_crop(self, image: Image.Image) -> Image.Image: scaled_image = Image.new("RGB", (CLIPSEG_SIZE, CLIPSEG_SIZE)) if image.width > image.height: # width is constraint scale = CLIPSEG_SIZE / image.width diff --git a/invokeai/backend/image_util/util.py b/invokeai/backend/image_util/util.py index bc7fa01e3b..7eceb9be82 100644 --- a/invokeai/backend/image_util/util.py +++ b/invokeai/backend/image_util/util.py @@ -9,7 +9,7 @@ class InitImageResizer: def __init__(self, Image): self.image = Image - def resize(self, width=None, height=None) -> Image: + def resize(self, width=None, height=None) -> Image.Image: """ Return a copy of the image resized to fit within a box width x height. The aspect ratio is diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index d4bcea64d0..59cf1260ba 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -793,7 +793,11 @@ def migrate_init_file(legacy_format: Path): old = legacy_parser.parse_args([f"@{str(legacy_format)}"]) new = InvokeAIAppConfig.get_config() - fields = [x for x, y in InvokeAIAppConfig.__fields__.items() if y.field_info.extra.get("category") != "DEPRECATED"] + fields = [ + x + for x, y in InvokeAIAppConfig.model_fields.items() + if (y.json_schema_extra.get("category", None) if y.json_schema_extra else None) != "DEPRECATED" + ] for attr in fields: if hasattr(old, attr): try: diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index bdc9a6c6bb..38a7361c85 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -236,13 +236,13 @@ import types from dataclasses import dataclass from pathlib import Path from shutil import move, rmtree -from typing import Callable, Dict, List, Literal, Optional, Set, Tuple, Union +from typing import Callable, Dict, List, Literal, Optional, Set, Tuple, Union, cast import torch import yaml from omegaconf import OmegaConf from omegaconf.dictconfig import DictConfig -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field import invokeai.backend.util.logging as logger from invokeai.app.services.config import InvokeAIAppConfig @@ -294,6 +294,8 @@ class AddModelResult(BaseModel): base_model: BaseModelType = Field(description="The base model") config: ModelConfigBase = Field(description="The configuration of the model") + model_config = ConfigDict(protected_namespaces=()) + MAX_CACHE_SIZE = 6.0 # GB @@ -576,7 +578,7 @@ class ModelManager(object): """ model_key = self.create_key(model_name, base_model, model_type) if model_key in self.models: - return self.models[model_key].dict(exclude_defaults=True) + return self.models[model_key].model_dump(exclude_defaults=True) else: return None # TODO: None or empty dict on not found @@ -632,7 +634,7 @@ class ModelManager(object): continue model_dict = dict( - **model_config.dict(exclude_defaults=True), + **model_config.model_dump(exclude_defaults=True), # OpenAPIModelInfoBase model_name=cur_model_name, base_model=cur_base_model, @@ -900,14 +902,16 @@ class ModelManager(object): Write current configuration out to the indicated file. """ data_to_save = dict() - data_to_save["__metadata__"] = self.config_meta.dict() + data_to_save["__metadata__"] = self.config_meta.model_dump() for model_key, model_config in self.models.items(): model_name, base_model, model_type = self.parse_key(model_key) model_class = self._get_implementation(base_model, model_type) if model_class.save_to_config: # TODO: or exclude_unset better fits here? - data_to_save[model_key] = model_config.dict(exclude_defaults=True, exclude={"error"}) + data_to_save[model_key] = cast(BaseModel, model_config).model_dump( + exclude_defaults=True, exclude={"error"}, mode="json" + ) # alias for config file data_to_save[model_key]["format"] = data_to_save[model_key].pop("model_format") diff --git a/invokeai/backend/model_management/models/__init__.py b/invokeai/backend/model_management/models/__init__.py index bf4b208395..0afd731032 100644 --- a/invokeai/backend/model_management/models/__init__.py +++ b/invokeai/backend/model_management/models/__init__.py @@ -2,7 +2,7 @@ import inspect from enum import Enum from typing import Literal, get_origin -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict, create_model from .base import ( # noqa: F401 BaseModelType, @@ -106,6 +106,8 @@ class OpenAPIModelInfoBase(BaseModel): base_model: BaseModelType model_type: ModelType + model_config = ConfigDict(protected_namespaces=()) + for base_model, models in MODEL_CLASSES.items(): for model_type, model_class in models.items(): @@ -121,17 +123,11 @@ for base_model, models in MODEL_CLASSES.items(): if openapi_cfg_name in vars(): continue - api_wrapper = type( + api_wrapper = create_model( openapi_cfg_name, - (cfg, OpenAPIModelInfoBase), - dict( - __annotations__=dict( - model_type=Literal[model_type.value], - ), - ), + __base__=(cfg, OpenAPIModelInfoBase), + model_type=(Literal[model_type], model_type), # type: ignore ) - - # globals()[openapi_cfg_name] = api_wrapper vars()[openapi_cfg_name] = api_wrapper OPENAPI_MODEL_CONFIGS.append(api_wrapper) diff --git a/invokeai/backend/model_management/models/base.py b/invokeai/backend/model_management/models/base.py index 6e507735d4..f735e37189 100644 --- a/invokeai/backend/model_management/models/base.py +++ b/invokeai/backend/model_management/models/base.py @@ -19,7 +19,7 @@ from diffusers import logging as diffusers_logging from onnx import numpy_helper from onnxruntime import InferenceSession, SessionOptions, get_available_providers from picklescan.scanner import scan_file_path -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from transformers import logging as transformers_logging @@ -86,14 +86,21 @@ class ModelError(str, Enum): NotFound = "not_found" +def model_config_json_schema_extra(schema: dict[str, Any]) -> None: + if "required" not in schema: + schema["required"] = [] + schema["required"].append("model_type") + + class ModelConfigBase(BaseModel): path: str # or Path description: Optional[str] = Field(None) model_format: Optional[str] = Field(None) error: Optional[ModelError] = Field(None) - class Config: - use_enum_values = True + model_config = ConfigDict( + use_enum_values=True, protected_namespaces=(), json_schema_extra=model_config_json_schema_extra + ) class EmptyConfigLoader(ConfigMixin): diff --git a/invokeai/backend/model_management/models/ip_adapter.py b/invokeai/backend/model_management/models/ip_adapter.py index 63694af0c8..c60edd0abe 100644 --- a/invokeai/backend/model_management/models/ip_adapter.py +++ b/invokeai/backend/model_management/models/ip_adapter.py @@ -58,14 +58,16 @@ class IPAdapterModel(ModelBase): def get_model( self, - torch_dtype: Optional[torch.dtype], + torch_dtype: torch.dtype, child_type: Optional[SubModelType] = None, ) -> typing.Union[IPAdapter, IPAdapterPlus]: if child_type is not None: raise ValueError("There are no child models in an IP-Adapter model.") model = build_ip_adapter( - ip_adapter_ckpt_path=os.path.join(self.model_path, "ip_adapter.bin"), device="cpu", dtype=torch_dtype + ip_adapter_ckpt_path=os.path.join(self.model_path, "ip_adapter.bin"), + device=torch.device("cpu"), + dtype=torch_dtype, ) self.model_size = model.calc_size() diff --git a/invokeai/backend/model_management/seamless.py b/invokeai/backend/model_management/seamless.py index 7138f2e123..bfdf9e0c53 100644 --- a/invokeai/backend/model_management/seamless.py +++ b/invokeai/backend/model_management/seamless.py @@ -96,7 +96,7 @@ def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL], seamless_axe finally: for module, orig_conv_forward in to_restore: module._conv_forward = orig_conv_forward - if hasattr(m, "asymmetric_padding_mode"): - del m.asymmetric_padding_mode - if hasattr(m, "asymmetric_padding"): - del m.asymmetric_padding + if hasattr(module, "asymmetric_padding_mode"): + del module.asymmetric_padding_mode + if hasattr(module, "asymmetric_padding"): + del module.asymmetric_padding diff --git a/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py b/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py index abef979b1c..b5ea40185a 100644 --- a/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py +++ b/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py @@ -1,7 +1,8 @@ import math +from typing import Optional -import PIL import torch +from PIL import Image from torchvision.transforms.functional import InterpolationMode from torchvision.transforms.functional import resize as tv_resize @@ -11,7 +12,7 @@ class AttentionMapSaver: self.token_ids = token_ids self.latents_shape = latents_shape # self.collated_maps = #torch.zeros([len(token_ids), latents_shape[0], latents_shape[1]]) - self.collated_maps = {} + self.collated_maps: dict[str, torch.Tensor] = {} def clear_maps(self): self.collated_maps = {} @@ -38,9 +39,10 @@ class AttentionMapSaver: def write_maps_to_disk(self, path: str): pil_image = self.get_stacked_maps_image() - pil_image.save(path, "PNG") + if pil_image is not None: + pil_image.save(path, "PNG") - def get_stacked_maps_image(self) -> PIL.Image: + def get_stacked_maps_image(self) -> Optional[Image.Image]: """ Scale all collected attention maps to the same size, blend them together and return as an image. :return: An image containing a vertical stack of blended attention maps, one for each requested token. @@ -95,4 +97,4 @@ class AttentionMapSaver: return None merged_bytes = merged.mul(0xFF).byte() - return PIL.Image.fromarray(merged_bytes.numpy(), mode="L") + return Image.fromarray(merged_bytes.numpy(), mode="L") diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts index 79a09c628f..bd5422841f 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts @@ -151,7 +151,9 @@ export const addRequestedSingleImageDeletionListener = () => { if (wasImageDeleted) { dispatch( - api.util.invalidateTags([{ type: 'Board', id: imageDTO.board_id }]) + api.util.invalidateTags([ + { type: 'Board', id: imageDTO.board_id ?? 'none' }, + ]) ); } }, diff --git a/invokeai/frontend/web/src/common/components/IAIMantineMultiSelect.tsx b/invokeai/frontend/web/src/common/components/IAIMantineMultiSelect.tsx index 7c85b3557e..5ea17f788c 100644 --- a/invokeai/frontend/web/src/common/components/IAIMantineMultiSelect.tsx +++ b/invokeai/frontend/web/src/common/components/IAIMantineMultiSelect.tsx @@ -6,7 +6,7 @@ import { useMantineMultiSelectStyles } from 'mantine-theme/hooks/useMantineMulti import { KeyboardEvent, RefObject, memo, useCallback } from 'react'; type IAIMultiSelectProps = Omit & { - tooltip?: string; + tooltip?: string | null; inputRef?: RefObject; label?: string; }; diff --git a/invokeai/frontend/web/src/common/components/IAIMantineSearchableSelect.tsx b/invokeai/frontend/web/src/common/components/IAIMantineSearchableSelect.tsx index 39fe7ead3c..675314b421 100644 --- a/invokeai/frontend/web/src/common/components/IAIMantineSearchableSelect.tsx +++ b/invokeai/frontend/web/src/common/components/IAIMantineSearchableSelect.tsx @@ -12,7 +12,7 @@ export type IAISelectDataType = { }; type IAISelectProps = Omit & { - tooltip?: string; + tooltip?: string | null; label?: string; inputRef?: RefObject; }; diff --git a/invokeai/frontend/web/src/common/components/IAIMantineSelect.tsx b/invokeai/frontend/web/src/common/components/IAIMantineSelect.tsx index 8cc08d2304..9541015b65 100644 --- a/invokeai/frontend/web/src/common/components/IAIMantineSelect.tsx +++ b/invokeai/frontend/web/src/common/components/IAIMantineSelect.tsx @@ -10,7 +10,7 @@ export type IAISelectDataType = { }; export type IAISelectProps = Omit & { - tooltip?: string; + tooltip?: string | null; inputRef?: RefObject; label?: string; }; diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/store/dynamicPromptsSlice.ts b/invokeai/frontend/web/src/features/dynamicPrompts/store/dynamicPromptsSlice.ts index 32e24845ea..4c2cd31eca 100644 --- a/invokeai/frontend/web/src/features/dynamicPrompts/store/dynamicPromptsSlice.ts +++ b/invokeai/frontend/web/src/features/dynamicPrompts/store/dynamicPromptsSlice.ts @@ -39,7 +39,10 @@ export const dynamicPromptsSlice = createSlice({ promptsChanged: (state, action: PayloadAction) => { state.prompts = action.payload; }, - parsingErrorChanged: (state, action: PayloadAction) => { + parsingErrorChanged: ( + state, + action: PayloadAction + ) => { state.parsingError = action.payload; }, isErrorChanged: (state, action: PayloadAction) => { diff --git a/invokeai/frontend/web/src/features/nodes/types/types.ts b/invokeai/frontend/web/src/features/nodes/types/types.ts index f7ef848211..87c716bb81 100644 --- a/invokeai/frontend/web/src/features/nodes/types/types.ts +++ b/invokeai/frontend/web/src/features/nodes/types/types.ts @@ -10,7 +10,7 @@ import { } from 'features/parameters/types/parameterSchemas'; import i18n from 'i18next'; import { has, keyBy } from 'lodash-es'; -import { OpenAPIV3 } from 'openapi-types'; +import { OpenAPIV3_1 } from 'openapi-types'; import { RgbaColor } from 'react-colorful'; import { Node } from 'reactflow'; import { Graph, _InputField, _OutputField } from 'services/api/types'; @@ -791,9 +791,9 @@ export type IntegerInputFieldTemplate = InputFieldTemplateBase & { default: number; multipleOf?: number; maximum?: number; - exclusiveMaximum?: boolean; + exclusiveMaximum?: number; minimum?: number; - exclusiveMinimum?: boolean; + exclusiveMinimum?: number; }; export type IntegerCollectionInputFieldTemplate = InputFieldTemplateBase & { @@ -814,9 +814,9 @@ export type FloatInputFieldTemplate = InputFieldTemplateBase & { default: number; multipleOf?: number; maximum?: number; - exclusiveMaximum?: boolean; + exclusiveMaximum?: number; minimum?: number; - exclusiveMinimum?: boolean; + exclusiveMinimum?: number; }; export type FloatCollectionInputFieldTemplate = InputFieldTemplateBase & { @@ -1163,20 +1163,20 @@ export type TypeHints = { }; export type InvocationSchemaExtra = { - output: OpenAPIV3.ReferenceObject; // the output of the invocation + output: OpenAPIV3_1.ReferenceObject; // the output of the invocation title: string; category?: string; tags?: string[]; version?: string; properties: Omit< - NonNullable & + NonNullable & (_InputField | _OutputField), 'type' > & { - type: Omit & { + type: Omit & { default: AnyInvocationType; }; - use_cache: Omit & { + use_cache: Omit & { default: boolean; }; }; @@ -1187,17 +1187,17 @@ export type InvocationSchemaType = { }; export type InvocationBaseSchemaObject = Omit< - OpenAPIV3.BaseSchemaObject, + OpenAPIV3_1.BaseSchemaObject, 'title' | 'type' | 'properties' > & InvocationSchemaExtra; export type InvocationOutputSchemaObject = Omit< - OpenAPIV3.SchemaObject, + OpenAPIV3_1.SchemaObject, 'properties' > & { - properties: OpenAPIV3.SchemaObject['properties'] & { - type: Omit & { + properties: OpenAPIV3_1.SchemaObject['properties'] & { + type: Omit & { default: string; }; } & { @@ -1205,14 +1205,18 @@ export type InvocationOutputSchemaObject = Omit< }; }; -export type InvocationFieldSchema = OpenAPIV3.SchemaObject & _InputField; +export type InvocationFieldSchema = OpenAPIV3_1.SchemaObject & _InputField; + +export type OpenAPIV3_1SchemaOrRef = + | OpenAPIV3_1.ReferenceObject + | OpenAPIV3_1.SchemaObject; export interface ArraySchemaObject extends InvocationBaseSchemaObject { - type: OpenAPIV3.ArraySchemaObjectType; - items: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject; + type: OpenAPIV3_1.ArraySchemaObjectType; + items: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject; } export interface NonArraySchemaObject extends InvocationBaseSchemaObject { - type?: OpenAPIV3.NonArraySchemaObjectType; + type?: OpenAPIV3_1.NonArraySchemaObjectType; } export type InvocationSchemaObject = ( @@ -1221,41 +1225,41 @@ export type InvocationSchemaObject = ( ) & { class: 'invocation' }; export const isSchemaObject = ( - obj: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject | undefined -): obj is OpenAPIV3.SchemaObject => Boolean(obj && !('$ref' in obj)); + obj: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject | undefined +): obj is OpenAPIV3_1.SchemaObject => Boolean(obj && !('$ref' in obj)); export const isArraySchemaObject = ( - obj: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject | undefined -): obj is OpenAPIV3.ArraySchemaObject => + obj: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject | undefined +): obj is OpenAPIV3_1.ArraySchemaObject => Boolean(obj && !('$ref' in obj) && obj.type === 'array'); export const isNonArraySchemaObject = ( - obj: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject | undefined -): obj is OpenAPIV3.NonArraySchemaObject => + obj: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject | undefined +): obj is OpenAPIV3_1.NonArraySchemaObject => Boolean(obj && !('$ref' in obj) && obj.type !== 'array'); export const isRefObject = ( - obj: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject | undefined -): obj is OpenAPIV3.ReferenceObject => Boolean(obj && '$ref' in obj); + obj: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject | undefined +): obj is OpenAPIV3_1.ReferenceObject => Boolean(obj && '$ref' in obj); export const isInvocationSchemaObject = ( obj: - | OpenAPIV3.ReferenceObject - | OpenAPIV3.SchemaObject + | OpenAPIV3_1.ReferenceObject + | OpenAPIV3_1.SchemaObject | InvocationSchemaObject ): obj is InvocationSchemaObject => 'class' in obj && obj.class === 'invocation'; export const isInvocationOutputSchemaObject = ( obj: - | OpenAPIV3.ReferenceObject - | OpenAPIV3.SchemaObject + | OpenAPIV3_1.ReferenceObject + | OpenAPIV3_1.SchemaObject | InvocationOutputSchemaObject ): obj is InvocationOutputSchemaObject => 'class' in obj && obj.class === 'output'; export const isInvocationFieldSchema = ( - obj: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject + obj: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject ): obj is InvocationFieldSchema => !('$ref' in obj); export type InvocationEdgeExtra = { type: 'default' | 'collapsed' }; diff --git a/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts b/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts index 1f7fe81620..3fd44207c0 100644 --- a/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts +++ b/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts @@ -1,5 +1,12 @@ -import { isBoolean, isInteger, isNumber, isString } from 'lodash-es'; -import { OpenAPIV3 } from 'openapi-types'; +import { + isArray, + isBoolean, + isInteger, + isNumber, + isString, + startCase, +} from 'lodash-es'; +import { OpenAPIV3_1 } from 'openapi-types'; import { COLLECTION_MAP, POLYMORPHIC_TYPES, @@ -72,6 +79,7 @@ import { T2IAdapterCollectionInputFieldTemplate, BoardInputFieldTemplate, InputFieldTemplate, + OpenAPIV3_1SchemaOrRef, } from '../types/types'; import { ControlField } from 'services/api/types'; @@ -90,7 +98,7 @@ export type BuildInputFieldArg = { * @example * refObjectToFieldType({ "$ref": "#/components/schemas/ImageField" }) --> 'ImageField' */ -export const refObjectToSchemaName = (refObject: OpenAPIV3.ReferenceObject) => +export const refObjectToSchemaName = (refObject: OpenAPIV3_1.ReferenceObject) => refObject.$ref.split('/').slice(-1)[0]; const buildIntegerInputFieldTemplate = ({ @@ -111,7 +119,10 @@ const buildIntegerInputFieldTemplate = ({ template.maximum = schemaObject.maximum; } - if (schemaObject.exclusiveMaximum !== undefined) { + if ( + schemaObject.exclusiveMaximum !== undefined && + isNumber(schemaObject.exclusiveMaximum) + ) { template.exclusiveMaximum = schemaObject.exclusiveMaximum; } @@ -119,7 +130,10 @@ const buildIntegerInputFieldTemplate = ({ template.minimum = schemaObject.minimum; } - if (schemaObject.exclusiveMinimum !== undefined) { + if ( + schemaObject.exclusiveMinimum !== undefined && + isNumber(schemaObject.exclusiveMinimum) + ) { template.exclusiveMinimum = schemaObject.exclusiveMinimum; } @@ -144,7 +158,10 @@ const buildIntegerPolymorphicInputFieldTemplate = ({ template.maximum = schemaObject.maximum; } - if (schemaObject.exclusiveMaximum !== undefined) { + if ( + schemaObject.exclusiveMaximum !== undefined && + isNumber(schemaObject.exclusiveMaximum) + ) { template.exclusiveMaximum = schemaObject.exclusiveMaximum; } @@ -152,7 +169,10 @@ const buildIntegerPolymorphicInputFieldTemplate = ({ template.minimum = schemaObject.minimum; } - if (schemaObject.exclusiveMinimum !== undefined) { + if ( + schemaObject.exclusiveMinimum !== undefined && + isNumber(schemaObject.exclusiveMinimum) + ) { template.exclusiveMinimum = schemaObject.exclusiveMinimum; } @@ -195,7 +215,10 @@ const buildFloatInputFieldTemplate = ({ template.maximum = schemaObject.maximum; } - if (schemaObject.exclusiveMaximum !== undefined) { + if ( + schemaObject.exclusiveMaximum !== undefined && + isNumber(schemaObject.exclusiveMaximum) + ) { template.exclusiveMaximum = schemaObject.exclusiveMaximum; } @@ -203,7 +226,10 @@ const buildFloatInputFieldTemplate = ({ template.minimum = schemaObject.minimum; } - if (schemaObject.exclusiveMinimum !== undefined) { + if ( + schemaObject.exclusiveMinimum !== undefined && + isNumber(schemaObject.exclusiveMinimum) + ) { template.exclusiveMinimum = schemaObject.exclusiveMinimum; } @@ -227,7 +253,10 @@ const buildFloatPolymorphicInputFieldTemplate = ({ template.maximum = schemaObject.maximum; } - if (schemaObject.exclusiveMaximum !== undefined) { + if ( + schemaObject.exclusiveMaximum !== undefined && + isNumber(schemaObject.exclusiveMaximum) + ) { template.exclusiveMaximum = schemaObject.exclusiveMaximum; } @@ -235,7 +264,10 @@ const buildFloatPolymorphicInputFieldTemplate = ({ template.minimum = schemaObject.minimum; } - if (schemaObject.exclusiveMinimum !== undefined) { + if ( + schemaObject.exclusiveMinimum !== undefined && + isNumber(schemaObject.exclusiveMinimum) + ) { template.exclusiveMinimum = schemaObject.exclusiveMinimum; } return template; @@ -872,84 +904,106 @@ const buildSchedulerInputFieldTemplate = ({ }; export const getFieldType = ( - schemaObject: InvocationFieldSchema + schemaObject: OpenAPIV3_1SchemaOrRef ): string | undefined => { - if (schemaObject?.ui_type) { - return schemaObject.ui_type; - } else if (!schemaObject.type) { - // if schemaObject has no type, then it should have one of allOf, anyOf, oneOf + if (isSchemaObject(schemaObject)) { + if (!schemaObject.type) { + // if schemaObject has no type, then it should have one of allOf, anyOf, oneOf - if (schemaObject.allOf) { - const allOf = schemaObject.allOf; - if (allOf && allOf[0] && isRefObject(allOf[0])) { - return refObjectToSchemaName(allOf[0]); - } - } else if (schemaObject.anyOf) { - const anyOf = schemaObject.anyOf; - /** - * Handle Polymorphic inputs, eg string | string[]. In OpenAPI, this is: - * - an `anyOf` with two items - * - one is an `ArraySchemaObject` with a single `SchemaObject or ReferenceObject` of type T in its `items` - * - the other is a `SchemaObject` or `ReferenceObject` of type T - * - * Any other cases we ignore. - */ - - let firstType: string | undefined; - let secondType: string | undefined; - - if (isArraySchemaObject(anyOf[0])) { - // first is array, second is not - const first = anyOf[0].items; - const second = anyOf[1]; - if (isRefObject(first) && isRefObject(second)) { - firstType = refObjectToSchemaName(first); - secondType = refObjectToSchemaName(second); - } else if ( - isNonArraySchemaObject(first) && - isNonArraySchemaObject(second) - ) { - firstType = first.type; - secondType = second.type; + if (schemaObject.allOf) { + const allOf = schemaObject.allOf; + if (allOf && allOf[0] && isRefObject(allOf[0])) { + return refObjectToSchemaName(allOf[0]); } - } else if (isArraySchemaObject(anyOf[1])) { - // first is not array, second is - const first = anyOf[0]; - const second = anyOf[1].items; - if (isRefObject(first) && isRefObject(second)) { - firstType = refObjectToSchemaName(first); - secondType = refObjectToSchemaName(second); - } else if ( - isNonArraySchemaObject(first) && - isNonArraySchemaObject(second) - ) { - firstType = first.type; - secondType = second.type; + } else if (schemaObject.anyOf) { + // ignore null types + const anyOf = schemaObject.anyOf.filter((i) => { + if (isSchemaObject(i)) { + if (i.type === 'null') { + return false; + } + } + return true; + }); + if (anyOf.length === 1) { + if (isRefObject(anyOf[0])) { + return refObjectToSchemaName(anyOf[0]); + } else if (isSchemaObject(anyOf[0])) { + return getFieldType(anyOf[0]); + } + } + /** + * Handle Polymorphic inputs, eg string | string[]. In OpenAPI, this is: + * - an `anyOf` with two items + * - one is an `ArraySchemaObject` with a single `SchemaObject or ReferenceObject` of type T in its `items` + * - the other is a `SchemaObject` or `ReferenceObject` of type T + * + * Any other cases we ignore. + */ + + let firstType: string | undefined; + let secondType: string | undefined; + + if (isArraySchemaObject(anyOf[0])) { + // first is array, second is not + const first = anyOf[0].items; + const second = anyOf[1]; + if (isRefObject(first) && isRefObject(second)) { + firstType = refObjectToSchemaName(first); + secondType = refObjectToSchemaName(second); + } else if ( + isNonArraySchemaObject(first) && + isNonArraySchemaObject(second) + ) { + firstType = first.type; + secondType = second.type; + } + } else if (isArraySchemaObject(anyOf[1])) { + // first is not array, second is + const first = anyOf[0]; + const second = anyOf[1].items; + if (isRefObject(first) && isRefObject(second)) { + firstType = refObjectToSchemaName(first); + secondType = refObjectToSchemaName(second); + } else if ( + isNonArraySchemaObject(first) && + isNonArraySchemaObject(second) + ) { + firstType = first.type; + secondType = second.type; + } + } + if (firstType === secondType && isPolymorphicItemType(firstType)) { + return SINGLE_TO_POLYMORPHIC_MAP[firstType]; } } - if (firstType === secondType && isPolymorphicItemType(firstType)) { - return SINGLE_TO_POLYMORPHIC_MAP[firstType]; + } else if (schemaObject.enum) { + return 'enum'; + } else if (schemaObject.type) { + if (schemaObject.type === 'number') { + // floats are "number" in OpenAPI, while ints are "integer" - we need to distinguish them + return 'float'; + } else if (schemaObject.type === 'array') { + const itemType = isSchemaObject(schemaObject.items) + ? schemaObject.items.type + : refObjectToSchemaName(schemaObject.items); + + if (isArray(itemType)) { + // This is a nested array, which we don't support + return; + } + + if (isCollectionItemType(itemType)) { + return COLLECTION_MAP[itemType]; + } + + return; + } else if (!isArray(schemaObject.type)) { + return schemaObject.type; } } - } else if (schemaObject.enum) { - return 'enum'; - } else if (schemaObject.type) { - if (schemaObject.type === 'number') { - // floats are "number" in OpenAPI, while ints are "integer" - we need to distinguish them - return 'float'; - } else if (schemaObject.type === 'array') { - const itemType = isSchemaObject(schemaObject.items) - ? schemaObject.items.type - : refObjectToSchemaName(schemaObject.items); - - if (isCollectionItemType(itemType)) { - return COLLECTION_MAP[itemType]; - } - - return; - } else { - return schemaObject.type; - } + } else if (isRefObject(schemaObject)) { + return refObjectToSchemaName(schemaObject); } return; }; @@ -1025,7 +1079,15 @@ export const buildInputFieldTemplate = ( name: string, fieldType: FieldType ) => { - const { input, ui_hidden, ui_component, ui_type, ui_order } = fieldSchema; + const { + input, + ui_hidden, + ui_component, + ui_type, + ui_order, + ui_choice_labels, + item_default, + } = fieldSchema; const extra = { // TODO: Can we support polymorphic inputs in the UI? @@ -1035,11 +1097,13 @@ export const buildInputFieldTemplate = ( ui_type, required: nodeSchema.required?.includes(name) ?? false, ui_order, + ui_choice_labels, + item_default, }; const baseField = { name, - title: fieldSchema.title ?? '', + title: fieldSchema.title ?? (name ? startCase(name) : ''), description: fieldSchema.description ?? '', fieldKind: 'input' as const, ...extra, diff --git a/invokeai/frontend/web/src/features/nodes/util/parseSchema.ts b/invokeai/frontend/web/src/features/nodes/util/parseSchema.ts index 69d8d9dd4c..93cd75dd75 100644 --- a/invokeai/frontend/web/src/features/nodes/util/parseSchema.ts +++ b/invokeai/frontend/web/src/features/nodes/util/parseSchema.ts @@ -1,7 +1,7 @@ import { logger } from 'app/logging/logger'; import { parseify } from 'common/util/serialize'; -import { reduce } from 'lodash-es'; -import { OpenAPIV3 } from 'openapi-types'; +import { reduce, startCase } from 'lodash-es'; +import { OpenAPIV3_1 } from 'openapi-types'; import { AnyInvocationType } from 'services/events/types'; import { FieldType, @@ -60,7 +60,7 @@ const isNotInDenylist = (schema: InvocationSchemaObject) => !invocationDenylist.includes(schema.properties.type.default); export const parseSchema = ( - openAPI: OpenAPIV3.Document, + openAPI: OpenAPIV3_1.Document, nodesAllowlistExtra: string[] | undefined = undefined, nodesDenylistExtra: string[] | undefined = undefined ): Record => { @@ -110,7 +110,7 @@ export const parseSchema = ( return inputsAccumulator; } - const fieldType = getFieldType(property); + const fieldType = property.ui_type ?? getFieldType(property); if (!isFieldType(fieldType)) { logger('nodes').warn( @@ -209,7 +209,7 @@ export const parseSchema = ( return outputsAccumulator; } - const fieldType = getFieldType(property); + const fieldType = property.ui_type ?? getFieldType(property); if (!isFieldType(fieldType)) { logger('nodes').warn( @@ -222,7 +222,8 @@ export const parseSchema = ( outputsAccumulator[propertyName] = { fieldKind: 'output', name: propertyName, - title: property.title ?? '', + title: + property.title ?? (propertyName ? startCase(propertyName) : ''), description: property.description ?? '', type: fieldType, ui_hidden: property.ui_hidden ?? false, diff --git a/invokeai/frontend/web/src/features/queue/components/common/QueueItemCard.tsx b/invokeai/frontend/web/src/features/queue/components/common/QueueItemCard.tsx index 9cc991335e..d441be4ecb 100644 --- a/invokeai/frontend/web/src/features/queue/components/common/QueueItemCard.tsx +++ b/invokeai/frontend/web/src/features/queue/components/common/QueueItemCard.tsx @@ -7,7 +7,7 @@ const QueueItemCard = ({ session_queue_item, label, }: { - session_queue_item?: components['schemas']['SessionQueueItem']; + session_queue_item?: components['schemas']['SessionQueueItem'] | null; label: string; }) => { return ( diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/ModelManager/subpanels/MergeModelsPanel.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/ModelManager/subpanels/MergeModelsPanel.tsx index 6837a2e853..e5c68ba6cf 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/ModelManager/subpanels/MergeModelsPanel.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/ModelManager/subpanels/MergeModelsPanel.tsx @@ -112,7 +112,7 @@ export default function MergeModelsPanel() { } }); - const mergeModelsInfo: MergeModelConfig = { + const mergeModelsInfo: MergeModelConfig['body'] = { model_names: models_names, merged_model_name: mergedModelName !== '' ? mergedModelName : models_names.join('-'), @@ -125,7 +125,7 @@ export default function MergeModelsPanel() { mergeModels({ base_model: baseModel, - body: mergeModelsInfo, + body: { body: mergeModelsInfo }, }) .unwrap() .then((_) => { diff --git a/invokeai/frontend/web/src/services/api/endpoints/images.ts b/invokeai/frontend/web/src/services/api/endpoints/images.ts index 3fa606d4b6..99a5fc5f50 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/images.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/images.ts @@ -520,7 +520,7 @@ export const imagesApi = api.injectEndpoints({ // assume all images are on the same board/category if (images[0]) { const categories = getCategories(images[0]); - const boardId = images[0].board_id; + const boardId = images[0].board_id ?? undefined; return [ { @@ -637,7 +637,7 @@ export const imagesApi = api.injectEndpoints({ // assume all images are on the same board/category if (images[0]) { const categories = getCategories(images[0]); - const boardId = images[0].board_id; + const boardId = images[0].board_id ?? undefined; return [ { type: 'ImageList', diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index e476217e6c..d4678dc03b 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -5,80 +5,6 @@ export type paths = { - "/api/v1/sessions/": { - /** - * List Sessions - * @deprecated - * @description Gets a list of sessions, optionally searching - */ - get: operations["list_sessions"]; - /** - * Create Session - * @deprecated - * @description Creates a new session, optionally initializing it with an invocation graph - */ - post: operations["create_session"]; - }; - "/api/v1/sessions/{session_id}": { - /** - * Get Session - * @deprecated - * @description Gets a session - */ - get: operations["get_session"]; - }; - "/api/v1/sessions/{session_id}/nodes": { - /** - * Add Node - * @deprecated - * @description Adds a node to the graph - */ - post: operations["add_node"]; - }; - "/api/v1/sessions/{session_id}/nodes/{node_path}": { - /** - * Update Node - * @deprecated - * @description Updates a node in the graph and removes all linked edges - */ - put: operations["update_node"]; - /** - * Delete Node - * @deprecated - * @description Deletes a node in the graph and removes all linked edges - */ - delete: operations["delete_node"]; - }; - "/api/v1/sessions/{session_id}/edges": { - /** - * Add Edge - * @deprecated - * @description Adds an edge to the graph - */ - post: operations["add_edge"]; - }; - "/api/v1/sessions/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}": { - /** - * Delete Edge - * @deprecated - * @description Deletes an edge from the graph - */ - delete: operations["delete_edge"]; - }; - "/api/v1/sessions/{session_id}/invoke": { - /** - * Invoke Session - * @deprecated - * @description Invokes a session - */ - put: operations["invoke_session"]; - /** - * Cancel Session Invoke - * @deprecated - * @description Invokes a session - */ - delete: operations["cancel_session_invoke"]; - }; "/api/v1/utilities/dynamicprompts": { /** * Parse Dynamicprompts @@ -481,18 +407,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * A * @description The first number @@ -506,9 +432,9 @@ export type components = { */ b?: number; /** - * Type + * type * @default add - * @enum {string} + * @constant */ type: "add"; }; @@ -551,7 +477,6 @@ export type components = { }; /** * BaseModelType - * @description An enumeration. * @enum {string} */ BaseModelType: "any" | "sd-1" | "sd-2" | "sdxl" | "sdxl-refiner"; @@ -566,11 +491,8 @@ export type components = { * Data * @description The batch data collection. */ - data?: components["schemas"]["BatchDatum"][][]; - /** - * Graph - * @description The graph to initialize the session with - */ + data?: components["schemas"]["BatchDatum"][][] | null; + /** @description The graph to initialize the session with */ graph: components["schemas"]["Graph"]; /** * Runs @@ -655,18 +577,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Width * @description The width of the image @@ -687,20 +609,19 @@ export type components = { */ mode?: "RGB" | "RGBA"; /** - * Color * @description The color of the image * @default { - * "r": 0, - * "g": 0, + * "a": 255, * "b": 0, - * "a": 255 + * "g": 0, + * "r": 0 * } */ color?: components["schemas"]["ColorField"]; /** - * Type + * type * @default blank_image - * @enum {string} + * @constant */ type: "blank_image"; }; @@ -719,27 +640,21 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Latents A - * @description Latents tensor - */ + use_cache?: boolean | null; + /** @description Latents tensor */ latents_a?: components["schemas"]["LatentsField"]; - /** - * Latents B - * @description Latents tensor - */ + /** @description Latents tensor */ latents_b?: components["schemas"]["LatentsField"]; /** * Alpha @@ -748,9 +663,9 @@ export type components = { */ alpha?: number; /** - * Type + * type * @default lblend - * @enum {string} + * @constant */ type: "lblend"; }; @@ -760,12 +675,12 @@ export type components = { * Board Name * @description The board's new name. */ - board_name?: string; + board_name?: string | null; /** * Cover Image Name * @description The name of the board's new cover image. */ - cover_image_name?: string; + cover_image_name?: string | null; }; /** * BoardDTO @@ -796,12 +711,12 @@ export type components = { * Deleted At * @description The deleted timestamp of the board. */ - deleted_at?: string; + deleted_at?: string | null; /** * Cover Image Name * @description The name of the board's cover image. */ - cover_image_name?: string; + cover_image_name: string | null; /** * Image Count * @description The number of images in the board. @@ -872,14 +787,11 @@ export type components = { * Board Id * @description The board from which image should be downloaded from */ - board_id?: string; + board_id?: string | null; }; /** Body_enqueue_batch */ Body_enqueue_batch: { - /** - * Batch - * @description Batch to process - */ + /** @description Batch to process */ batch: components["schemas"]["Batch"]; /** * Prepend @@ -890,10 +802,7 @@ export type components = { }; /** Body_enqueue_graph */ Body_enqueue_graph: { - /** - * Graph - * @description The graph to enqueue - */ + /** @description The graph to enqueue */ graph: components["schemas"]["Graph"]; /** * Prepend @@ -912,41 +821,13 @@ export type components = { /** * Prediction Type * @description Prediction type for SDv2 checkpoints and rare SDv1 checkpoints - * @enum {string} */ - prediction_type?: "v_prediction" | "epsilon" | "sample"; + prediction_type?: ("v_prediction" | "epsilon" | "sample") | null; }; /** Body_merge_models */ Body_merge_models: { - /** - * Model Names - * @description model name - */ - model_names: string[]; - /** - * Merged Model Name - * @description Name of destination model - */ - merged_model_name: string; - /** - * Alpha - * @description Alpha weighting strength to apply to 2d and 3d models - * @default 0.5 - */ - alpha?: number; - /** @description Interpolation method */ - interp: components["schemas"]["MergeInterpolationMethod"]; - /** - * Force - * @description Force merging of models created with different versions of diffusers - * @default false - */ - force?: boolean; - /** - * Merge Dest Directory - * @description Save the merged model to the designated directory (with 'merged_model_name' appended) - */ - merge_dest_directory?: string; + /** @description Model configuration */ + body: components["schemas"]["MergeModelsBody"]; }; /** Body_parse_dynamicprompts */ Body_parse_dynamicprompts: { @@ -1023,27 +904,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of boolean values */ collection?: boolean[]; /** - * Type + * type * @default boolean_collection - * @enum {string} + * @constant */ type: "boolean_collection"; }; @@ -1058,9 +939,9 @@ export type components = { */ collection: boolean[]; /** - * Type + * type * @default boolean_collection_output - * @enum {string} + * @constant */ type: "boolean_collection_output"; }; @@ -1079,18 +960,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The boolean value @@ -1098,9 +979,9 @@ export type components = { */ value?: boolean; /** - * Type + * type * @default boolean - * @enum {string} + * @constant */ type: "boolean"; }; @@ -1115,9 +996,9 @@ export type components = { */ value: boolean; /** - * Type + * type * @default boolean_output - * @enum {string} + * @constant */ type: "boolean_output"; }; @@ -1128,19 +1009,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default clip_vision + * @constant */ model_type: "clip_vision"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** CLIPVisionModelField */ CLIPVisionModelField: { @@ -1167,27 +1049,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to infill - */ + use_cache?: boolean | null; + /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default infill_cv2 - * @enum {string} + * @constant */ type: "infill_cv2"; }; @@ -1217,29 +1096,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default canny_image_processor - * @enum {string} - */ - type: "canny_image_processor"; /** * Low Threshold * @description The low threshold of the Canny pixel gradient (0-255) @@ -1252,6 +1122,12 @@ export type components = { * @default 200 */ high_threshold?: number; + /** + * type + * @default canny_image_processor + * @constant + */ + type: "canny_image_processor"; }; /** * ClearResult @@ -1266,15 +1142,9 @@ export type components = { }; /** ClipField */ ClipField: { - /** - * Tokenizer - * @description Info to load tokenizer submodel - */ + /** @description Info to load tokenizer submodel */ tokenizer: components["schemas"]["ModelInfo"]; - /** - * Text Encoder - * @description Info to load text_encoder submodel - */ + /** @description Info to load text_encoder submodel */ text_encoder: components["schemas"]["ModelInfo"]; /** * Skipped Layers @@ -1302,18 +1172,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count @@ -1326,9 +1196,9 @@ export type components = { */ skipped_layers?: number; /** - * Type + * type * @default clip_skip - * @enum {string} + * @constant */ type: "clip_skip"; }; @@ -1341,11 +1211,11 @@ export type components = { * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components["schemas"]["ClipField"] | null; /** - * Type + * type * @default clip_skip_output - * @enum {string} + * @constant */ type: "clip_skip_output"; }; @@ -1364,18 +1234,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection Item * @description The item to collect (all inputs must be of the same type) @@ -1387,18 +1257,13 @@ export type components = { */ collection?: unknown[]; /** - * Type + * type * @default collect - * @enum {string} + * @constant */ type: "collect"; }; - /** - * CollectInvocationOutput - * @description Base class for all invocation outputs. - * - * All invocation outputs must use the `@invocation_output` decorator to provide their unique type. - */ + /** CollectInvocationOutput */ CollectInvocationOutput: { /** * Collection @@ -1406,9 +1271,9 @@ export type components = { */ collection: unknown[]; /** - * Type + * type * @default collect_output - * @enum {string} + * @constant */ type: "collect_output"; }; @@ -1423,9 +1288,9 @@ export type components = { */ collection: components["schemas"]["ColorField"][]; /** - * Type + * type * @default color_collection_output - * @enum {string} + * @constant */ type: "color_collection_output"; }; @@ -1445,33 +1310,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to color-correct - */ + use_cache?: boolean | null; + /** @description The image to color-correct */ image?: components["schemas"]["ImageField"]; - /** - * Reference - * @description Reference image for color-correction - */ + /** @description Reference image for color-correction */ reference?: components["schemas"]["ImageField"]; - /** - * Mask - * @description Mask to use when applying color-correction - */ - mask?: components["schemas"]["ImageField"]; + /** @description Mask to use when applying color-correction */ + mask?: components["schemas"]["ImageField"] | null; /** * Mask Blur Radius * @description Mask blur radius @@ -1479,9 +1335,9 @@ export type components = { */ mask_blur_radius?: number; /** - * Type + * type * @default color_correct - * @enum {string} + * @constant */ type: "color_correct"; }; @@ -1526,33 +1382,32 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** - * Color * @description The color value * @default { - * "r": 0, - * "g": 0, + * "a": 255, * "b": 0, - * "a": 255 + * "g": 0, + * "r": 0 * } */ color?: components["schemas"]["ColorField"]; /** - * Type + * type * @default color - * @enum {string} + * @constant */ type: "color"; }; @@ -1571,50 +1426,44 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default color_map_image_processor - * @enum {string} - */ - type: "color_map_image_processor"; /** * Color Map Tile Size * @description Tile size * @default 64 */ color_map_tile_size?: number; + /** + * type + * @default color_map_image_processor + * @constant + */ + type: "color_map_image_processor"; }; /** * ColorOutput * @description Base class for nodes that output a single color */ ColorOutput: { - /** - * Color - * @description The output color - */ + /** @description The output color */ color: components["schemas"]["ColorField"]; /** - * Type + * type * @default color_output - * @enum {string} + * @constant */ type: "color_output"; }; @@ -1633,35 +1482,35 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Prompt * @description Prompt to be parsed by Compel to create a conditioning tensor * @default */ prompt?: string; - /** - * Type - * @default compel - * @enum {string} - */ - type: "compel"; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ clip?: components["schemas"]["ClipField"]; + /** + * type + * @default compel + * @constant + */ + type: "compel"; }; /** * Conditioning Collection Primitive @@ -1678,27 +1527,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of conditioning tensors */ collection?: components["schemas"]["ConditioningField"][]; /** - * Type + * type * @default conditioning_collection - * @enum {string} + * @constant */ type: "conditioning_collection"; }; @@ -1713,9 +1562,9 @@ export type components = { */ collection: components["schemas"]["ConditioningField"][]; /** - * Type + * type * @default conditioning_collection_output - * @enum {string} + * @constant */ type: "conditioning_collection_output"; }; @@ -1745,27 +1594,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Conditioning - * @description Conditioning tensor - */ + use_cache?: boolean | null; + /** @description Conditioning tensor */ conditioning?: components["schemas"]["ConditioningField"]; /** - * Type + * type * @default conditioning - * @enum {string} + * @constant */ type: "conditioning"; }; @@ -1774,15 +1620,12 @@ export type components = { * @description Base class for nodes that output a single conditioning tensor */ ConditioningOutput: { - /** - * Conditioning - * @description Conditioning tensor - */ + /** @description Conditioning tensor */ conditioning: components["schemas"]["ConditioningField"]; /** - * Type + * type * @default conditioning_output - * @enum {string} + * @constant */ type: "conditioning_output"; }; @@ -1801,29 +1644,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default content_shuffle_image_processor - * @enum {string} - */ - type: "content_shuffle_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -1854,18 +1688,18 @@ export type components = { * @default 256 */ f?: number; + /** + * type + * @default content_shuffle_image_processor + * @constant + */ + type: "content_shuffle_image_processor"; }; /** ControlField */ ControlField: { - /** - * Image - * @description The control image - */ + /** @description The control image */ image: components["schemas"]["ImageField"]; - /** - * Control Model - * @description The ControlNet model to use - */ + /** @description The ControlNet model to use */ control_model: components["schemas"]["ControlNetModelField"]; /** * Control Weight @@ -1915,27 +1749,21 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The control image - */ + use_cache?: boolean | null; + /** @description The control image */ image?: components["schemas"]["ImageField"]; - /** - * Control Model - * @description ControlNet model to load - */ + /** @description ControlNet model to load */ control_model: components["schemas"]["ControlNetModelField"]; /** * Control Weight @@ -1970,9 +1798,9 @@ export type components = { */ resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; /** - * Type + * type * @default controlnet - * @enum {string} + * @constant */ type: "controlnet"; }; @@ -1983,19 +1811,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default controlnet + * @constant */ model_type: "controlnet"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "checkpoint"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Config */ config: string; }; @@ -2006,19 +1835,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default controlnet + * @constant */ model_type: "controlnet"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** * ControlNetModelField @@ -2038,15 +1868,12 @@ export type components = { * @description node output for ControlNet info */ ControlOutput: { - /** - * Control - * @description ControlNet(s) to apply - */ + /** @description ControlNet(s) to apply */ control: components["schemas"]["ControlField"]; /** - * Type + * type * @default control_output - * @enum {string} + * @constant */ type: "control_output"; }; @@ -2065,147 +1892,138 @@ export type components = { * Generation Mode * @description The generation mode that output this image */ - generation_mode?: string; + generation_mode?: string | null; /** * Created By * @description The name of the creator of the image */ - created_by?: string; + created_by: string | null; /** * Positive Prompt * @description The positive prompt parameter */ - positive_prompt?: string; + positive_prompt?: string | null; /** * Negative Prompt * @description The negative prompt parameter */ - negative_prompt?: string; + negative_prompt?: string | null; /** * Width * @description The width parameter */ - width?: number; + width?: number | null; /** * Height * @description The height parameter */ - height?: number; + height?: number | null; /** * Seed * @description The seed used for noise generation */ - seed?: number; + seed?: number | null; /** * Rand Device * @description The device used for random number generation */ - rand_device?: string; + rand_device?: string | null; /** * Cfg Scale * @description The classifier-free guidance scale parameter */ - cfg_scale?: number; + cfg_scale?: number | null; /** * Steps * @description The number of steps used for inference */ - steps?: number; + steps?: number | null; /** * Scheduler * @description The scheduler used for inference */ - scheduler?: string; + scheduler?: string | null; /** * Clip Skip * @description The number of skipped CLIP layers */ - clip_skip?: number; - /** - * Model - * @description The main model used for inference - */ - model?: components["schemas"]["MainModelField"]; + clip_skip?: number | null; + /** @description The main model used for inference */ + model?: components["schemas"]["MainModelField"] | null; /** * Controlnets * @description The ControlNets used for inference */ - controlnets?: components["schemas"]["ControlField"][]; + controlnets?: components["schemas"]["ControlField"][] | null; /** * Ipadapters * @description The IP Adapters used for inference */ - ipAdapters?: components["schemas"]["IPAdapterMetadataField"][]; + ipAdapters?: components["schemas"]["IPAdapterMetadataField"][] | null; /** * T2Iadapters * @description The IP Adapters used for inference */ - t2iAdapters?: components["schemas"]["T2IAdapterField"][]; + t2iAdapters?: components["schemas"]["T2IAdapterField"][] | null; /** * Loras * @description The LoRAs used for inference */ - loras?: components["schemas"]["LoRAMetadataField"][]; - /** - * Vae - * @description The VAE used for decoding, if the main model's default was not used - */ - vae?: components["schemas"]["VAEModelField"]; + loras?: components["schemas"]["LoRAMetadataField"][] | null; + /** @description The VAE used for decoding, if the main model's default was not used */ + vae?: components["schemas"]["VAEModelField"] | null; /** * Strength * @description The strength used for latents-to-latents */ - strength?: number; + strength?: number | null; /** * Init Image * @description The name of the initial image */ - init_image?: string; + init_image?: string | null; /** * Positive Style Prompt * @description The positive style prompt parameter */ - positive_style_prompt?: string; + positive_style_prompt?: string | null; /** * Negative Style Prompt * @description The negative style prompt parameter */ - negative_style_prompt?: string; - /** - * Refiner Model - * @description The SDXL Refiner model used - */ - refiner_model?: components["schemas"]["MainModelField"]; + negative_style_prompt?: string | null; + /** @description The SDXL Refiner model used */ + refiner_model?: components["schemas"]["MainModelField"] | null; /** * Refiner Cfg Scale * @description The classifier-free guidance scale parameter used for the refiner */ - refiner_cfg_scale?: number; + refiner_cfg_scale?: number | null; /** * Refiner Steps * @description The number of steps used for the refiner */ - refiner_steps?: number; + refiner_steps?: number | null; /** * Refiner Scheduler * @description The scheduler used for the refiner */ - refiner_scheduler?: string; + refiner_scheduler?: string | null; /** * Refiner Positive Aesthetic Score * @description The aesthetic score used for the refiner */ - refiner_positive_aesthetic_score?: number; + refiner_positive_aesthetic_score?: number | null; /** * Refiner Negative Aesthetic Score * @description The aesthetic score used for the refiner */ - refiner_negative_aesthetic_score?: number; + refiner_negative_aesthetic_score?: number | null; /** * Refiner Start * @description The start value used for refiner denoising */ - refiner_start?: number; + refiner_start?: number | null; }; /** * Create Denoise Mask @@ -2222,32 +2040,23 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Vae - * @description VAE - */ + use_cache?: boolean | null; + /** @description VAE */ vae?: components["schemas"]["VaeField"]; - /** - * Image - * @description Image which will be masked - */ - image?: components["schemas"]["ImageField"]; - /** - * Mask - * @description The mask to use when pasting - */ + /** @description Image which will be masked */ + image?: components["schemas"]["ImageField"] | null; + /** @description The mask to use when pasting */ mask?: components["schemas"]["ImageField"]; /** * Tiled @@ -2258,21 +2067,17 @@ export type components = { /** * Fp32 * @description Whether or not to use full float32 precision - * @default true + * @default false */ fp32?: boolean; /** - * Type + * type * @default create_denoise_mask - * @enum {string} + * @constant */ type: "create_denoise_mask"; }; - /** - * CursorPaginatedResults[SessionQueueItemDTO] - * @description Cursor-paginated results - * Generic must be a Pydantic model - */ + /** CursorPaginatedResults[SessionQueueItemDTO] */ CursorPaginatedResults_SessionQueueItemDTO_: { /** * Limit @@ -2305,32 +2110,26 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to inpaint - */ + use_cache?: boolean | null; + /** @description The image to inpaint */ image?: components["schemas"]["ImageField"]; - /** - * Mask - * @description The mask to use when inpainting - */ + /** @description The mask to use when inpainting */ mask?: components["schemas"]["ImageField"]; /** - * Type + * type * @default cv_inpaint - * @enum {string} + * @constant */ type: "cv_inpaint"; }; @@ -2372,23 +2171,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Noise - * @description Noise tensor - */ - noise?: components["schemas"]["LatentsField"]; + use_cache?: boolean | null; + /** @description Positive conditioning tensor */ + positive_conditioning?: components["schemas"]["ConditioningField"]; + /** @description Negative conditioning tensor */ + negative_conditioning?: components["schemas"]["ConditioningField"]; + /** @description Noise tensor */ + noise?: components["schemas"]["LatentsField"] | null; /** * Steps * @description Number of steps to run @@ -2420,49 +2220,33 @@ export type components = { * @enum {string} */ scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; - /** Control */ - control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][]; - /** - * IP-Adapter - * @description IP-Adapter to apply - */ - ip_adapter?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][]; - /** - * T2I-Adapter - * @description T2I-Adapter(s) to apply - */ - t2i_adapter?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][]; - /** - * Latents - * @description Latents tensor - */ - latents?: components["schemas"]["LatentsField"]; - /** - * Denoise Mask - * @description The mask to use for the operation - */ - denoise_mask?: components["schemas"]["DenoiseMaskField"]; - /** - * Type - * @default denoise_latents - * @enum {string} - */ - type: "denoise_latents"; - /** - * Positive Conditioning - * @description Positive conditioning tensor - */ - positive_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Negative Conditioning - * @description Negative conditioning tensor - */ - negative_conditioning?: components["schemas"]["ConditioningField"]; /** * UNet * @description UNet (scheduler, LoRAs) */ unet?: components["schemas"]["UNetField"]; + /** Control */ + control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null; + /** + * IP-Adapter + * @description IP-Adapter to apply + */ + ip_adapter?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; + /** + * T2I-Adapter + * @description T2I-Adapter(s) to apply + */ + t2i_adapter?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][] | null; + /** @description Latents tensor */ + latents?: components["schemas"]["LatentsField"] | null; + /** @description The mask to use for the operation */ + denoise_mask?: components["schemas"]["DenoiseMaskField"] | null; + /** + * type + * @default denoise_latents + * @constant + */ + type: "denoise_latents"; }; /** * DenoiseMaskField @@ -2478,22 +2262,19 @@ export type components = { * Masked Latents Name * @description The name of the masked image latents */ - masked_latents_name?: string; + masked_latents_name: string | null; }; /** * DenoiseMaskOutput * @description Base class for nodes that output a single image */ DenoiseMaskOutput: { - /** - * Denoise Mask - * @description Mask for denoise model run - */ + /** @description Mask for denoise model run */ denoise_mask: components["schemas"]["DenoiseMaskField"]; /** - * Type + * type * @default denoise_mask_output - * @enum {string} + * @constant */ type: "denoise_mask_output"; }; @@ -2512,18 +2293,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * A * @description The first number @@ -2537,9 +2318,9 @@ export type components = { */ b?: number; /** - * Type + * type * @default div - * @enum {string} + * @constant */ type: "div"; }; @@ -2558,18 +2339,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default false */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Prompt * @description The prompt to parse with dynamicprompts @@ -2588,9 +2369,9 @@ export type components = { */ combinatorial?: boolean; /** - * Type + * type * @default dynamic_prompt - * @enum {string} + * @constant */ type: "dynamic_prompt"; }; @@ -2599,7 +2380,7 @@ export type components = { /** Prompts */ prompts: string[]; /** Error */ - error?: string; + error?: string | null; }; /** * Upscale (RealESRGAN) @@ -2616,22 +2397,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The input image - */ + use_cache?: boolean | null; + /** @description The input image */ image?: components["schemas"]["ImageField"]; /** * Model Name @@ -2647,23 +2425,17 @@ export type components = { */ tile_size?: number; /** - * Type + * type * @default esrgan - * @enum {string} + * @constant */ type: "esrgan"; }; /** Edge */ Edge: { - /** - * Source - * @description The connection for the edge's from node and field - */ + /** @description The connection for the edge's from node and field */ source: components["schemas"]["EdgeConnection"]; - /** - * Destination - * @description The connection for the edge's to node and field - */ + /** @description The connection for the edge's to node and field */ destination: components["schemas"]["EdgeConnection"]; }; /** EdgeConnection */ @@ -2696,10 +2468,7 @@ export type components = { * @description The total number of queue items requested to be enqueued */ requested: number; - /** - * Batch - * @description The batch that was enqueued - */ + /** @description The batch that was enqueued */ batch: components["schemas"]["Batch"]; /** * Priority @@ -2719,20 +2488,14 @@ export type components = { * @description The total number of queue items requested to be enqueued */ requested: number; - /** - * Batch - * @description The batch that was enqueued - */ + /** @description The batch that was enqueued */ batch: components["schemas"]["Batch"]; /** * Priority * @description The priority of the enqueued batch */ priority: number; - /** - * Queue Item - * @description The queue item that was enqueued - */ + /** @description The queue item that was enqueued */ queue_item: components["schemas"]["SessionQueueItemDTO"]; }; /** @@ -2750,22 +2513,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description Image to face detect - */ + use_cache?: boolean | null; + /** @description Image to face detect */ image?: components["schemas"]["ImageField"]; /** * Minimum Confidence @@ -2780,9 +2540,9 @@ export type components = { */ chunk?: boolean; /** - * Type + * type * @default face_identifier - * @enum {string} + * @constant */ type: "face_identifier"; }; @@ -2801,22 +2561,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description Image to face detect - */ + use_cache?: boolean | null; + /** @description Image to face detect */ image?: components["schemas"]["ImageField"]; /** * Face Ids @@ -2855,9 +2612,9 @@ export type components = { */ invert_mask?: boolean; /** - * Type + * type * @default face_mask_detection - * @enum {string} + * @constant */ type: "face_mask_detection"; }; @@ -2866,10 +2623,7 @@ export type components = { * @description Base class for FaceMask output */ FaceMaskOutput: { - /** - * Image - * @description The output image - */ + /** @description The output image */ image: components["schemas"]["ImageField"]; /** * Width @@ -2882,15 +2636,12 @@ export type components = { */ height: number; /** - * Type + * type * @default face_mask_output - * @enum {string} + * @constant */ type: "face_mask_output"; - /** - * Mask - * @description The output mask - */ + /** @description The output mask */ mask: components["schemas"]["ImageField"]; }; /** @@ -2908,22 +2659,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description Image for face detection - */ + use_cache?: boolean | null; + /** @description Image for face detection */ image?: components["schemas"]["ImageField"]; /** * Face Id @@ -2962,9 +2710,9 @@ export type components = { */ chunk?: boolean; /** - * Type + * type * @default face_off - * @enum {string} + * @constant */ type: "face_off"; }; @@ -2973,10 +2721,7 @@ export type components = { * @description Base class for FaceOff Output */ FaceOffOutput: { - /** - * Image - * @description The output image - */ + /** @description The output image */ image: components["schemas"]["ImageField"]; /** * Width @@ -2989,15 +2734,12 @@ export type components = { */ height: number; /** - * Type + * type * @default face_off_output - * @enum {string} + * @constant */ type: "face_off_output"; - /** - * Mask - * @description The output mask - */ + /** @description The output mask */ mask: components["schemas"]["ImageField"]; /** * X @@ -3025,27 +2767,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of float values */ collection?: number[]; /** - * Type + * type * @default float_collection - * @enum {string} + * @constant */ type: "float_collection"; }; @@ -3060,9 +2802,9 @@ export type components = { */ collection: number[]; /** - * Type + * type * @default float_collection_output - * @enum {string} + * @constant */ type: "float_collection_output"; }; @@ -3081,18 +2823,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The float value @@ -3100,9 +2842,9 @@ export type components = { */ value?: number; /** - * Type + * type * @default float - * @enum {string} + * @constant */ type: "float"; }; @@ -3121,18 +2863,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Start * @description The first value of the range @@ -3152,9 +2894,9 @@ export type components = { */ steps?: number; /** - * Type + * type * @default float_range - * @enum {string} + * @constant */ type: "float_range"; }; @@ -3173,18 +2915,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Operation * @description The operation to perform @@ -3205,9 +2947,9 @@ export type components = { */ b?: number; /** - * Type + * type * @default float_math - * @enum {string} + * @constant */ type: "float_math"; }; @@ -3222,9 +2964,9 @@ export type components = { */ value: number; /** - * Type + * type * @default float_output - * @enum {string} + * @constant */ type: "float_output"; }; @@ -3243,18 +2985,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The value to round @@ -3275,9 +3017,9 @@ export type components = { */ method?: "Nearest" | "Floor" | "Ceiling" | "Truncate"; /** - * Type + * type * @default float_to_int - * @enum {string} + * @constant */ type: "float_to_int"; }; @@ -3293,7 +3035,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; + [key: string]: components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["MainModelLoaderInvocation"]; }; /** * Edges @@ -3311,15 +3053,9 @@ export type components = { * @description The id of the execution state */ id: string; - /** - * Graph - * @description The graph being executed - */ + /** @description The graph being executed */ graph: components["schemas"]["Graph"]; - /** - * Execution Graph - * @description The expanded graph of activated and executed nodes - */ + /** @description The expanded graph of activated and executed nodes */ execution_graph: components["schemas"]["Graph"]; /** * Executed @@ -3336,7 +3072,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["BooleanOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["String2Output"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"]; + [key: string]: components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["String2Output"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SchedulerOutput"]; }; /** * Errors @@ -3375,41 +3111,33 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Graph - * @description The graph to run - */ + use_cache?: boolean | null; + /** @description The graph to run */ graph?: components["schemas"]["Graph"]; /** - * Type + * type * @default graph - * @enum {string} + * @constant */ type: "graph"; }; - /** - * GraphInvocationOutput - * @description Base class for all invocation outputs. - * - * All invocation outputs must use the `@invocation_output` decorator to provide their unique type. - */ + /** GraphInvocationOutput */ GraphInvocationOutput: { /** - * Type + * type * @default graph_output - * @enum {string} + * @constant */ type: "graph_output"; }; @@ -3433,29 +3161,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default hed_image_processor - * @enum {string} - */ - type: "hed_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -3474,23 +3193,20 @@ export type components = { * @default false */ scribble?: boolean; + /** + * type + * @default hed_image_processor + * @constant + */ + type: "hed_image_processor"; }; /** IPAdapterField */ IPAdapterField: { - /** - * Image - * @description The IP-Adapter image prompt. - */ + /** @description The IP-Adapter image prompt. */ image: components["schemas"]["ImageField"]; - /** - * Ip Adapter Model - * @description The IP-Adapter model to use. - */ + /** @description The IP-Adapter model to use. */ ip_adapter_model: components["schemas"]["IPAdapterModelField"]; - /** - * Image Encoder Model - * @description The name of the CLIP image encoder model. - */ + /** @description The name of the CLIP image encoder model. */ image_encoder_model: components["schemas"]["CLIPVisionModelField"]; /** * Weight @@ -3526,22 +3242,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The IP-Adapter image prompt. - */ + use_cache?: boolean | null; + /** @description The IP-Adapter image prompt. */ image?: components["schemas"]["ImageField"]; /** * IP-Adapter Model @@ -3567,23 +3280,17 @@ export type components = { */ end_step_percent?: number; /** - * Type + * type * @default ip_adapter - * @enum {string} + * @constant */ type: "ip_adapter"; }; /** IPAdapterMetadataField */ IPAdapterMetadataField: { - /** - * Image - * @description The IP-Adapter image prompt. - */ + /** @description The IP-Adapter image prompt. */ image: components["schemas"]["ImageField"]; - /** - * Ip Adapter Model - * @description The IP-Adapter model to use. - */ + /** @description The IP-Adapter model to use. */ ip_adapter_model: components["schemas"]["IPAdapterModelField"]; /** * Weight @@ -3620,26 +3327,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default ip_adapter + * @constant */ model_type: "ip_adapter"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "invokeai"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; - /** - * IPAdapterOutput - * @description Base class for all invocation outputs. - * - * All invocation outputs must use the `@invocation_output` decorator to provide their unique type. - */ + /** IPAdapterOutput */ IPAdapterOutput: { /** * IP-Adapter @@ -3647,9 +3350,9 @@ export type components = { */ ip_adapter: components["schemas"]["IPAdapterField"]; /** - * Type + * type * @default ip_adapter_output - * @enum {string} + * @constant */ type: "ip_adapter_output"; }; @@ -3668,22 +3371,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to blur - */ + use_cache?: boolean | null; + /** @description The image to blur */ image?: components["schemas"]["ImageField"]; /** * Radius @@ -3699,9 +3399,9 @@ export type components = { */ blur_type?: "gaussian" | "box"; /** - * Type + * type * @default img_blur - * @enum {string} + * @constant */ type: "img_blur"; }; @@ -3732,22 +3432,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to get the channel from - */ + use_cache?: boolean | null; + /** @description The image to get the channel from */ image?: components["schemas"]["ImageField"]; /** * Channel @@ -3757,9 +3454,9 @@ export type components = { */ channel?: "A" | "R" | "G" | "B"; /** - * Type + * type * @default img_chan - * @enum {string} + * @constant */ type: "img_chan"; }; @@ -3778,22 +3475,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to adjust - */ + use_cache?: boolean | null; + /** @description The image to adjust */ image?: components["schemas"]["ImageField"]; /** * Channel @@ -3814,9 +3508,9 @@ export type components = { */ invert_channel?: boolean; /** - * Type + * type * @default img_channel_multiply - * @enum {string} + * @constant */ type: "img_channel_multiply"; }; @@ -3835,22 +3529,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to adjust - */ + use_cache?: boolean | null; + /** @description The image to adjust */ image?: components["schemas"]["ImageField"]; /** * Channel @@ -3865,9 +3556,9 @@ export type components = { */ offset?: number; /** - * Type + * type * @default img_channel_offset - * @enum {string} + * @constant */ type: "img_channel_offset"; }; @@ -3886,27 +3577,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of image values */ collection?: components["schemas"]["ImageField"][]; /** - * Type + * type * @default image_collection - * @enum {string} + * @constant */ type: "image_collection"; }; @@ -3921,9 +3612,9 @@ export type components = { */ collection: components["schemas"]["ImageField"][]; /** - * Type + * type * @default image_collection_output - * @enum {string} + * @constant */ type: "image_collection_output"; }; @@ -3942,22 +3633,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to convert - */ + use_cache?: boolean | null; + /** @description The image to convert */ image?: components["schemas"]["ImageField"]; /** * Mode @@ -3967,9 +3655,9 @@ export type components = { */ mode?: "L" | "RGB" | "RGBA" | "CMYK" | "YCbCr" | "LAB" | "HSV" | "I" | "F"; /** - * Type + * type * @default img_conv - * @enum {string} + * @constant */ type: "img_conv"; }; @@ -3988,22 +3676,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to crop - */ + use_cache?: boolean | null; + /** @description The image to crop */ image?: components["schemas"]["ImageField"]; /** * X @@ -4030,9 +3715,9 @@ export type components = { */ height?: number; /** - * Type + * type * @default img_crop - * @enum {string} + * @constant */ type: "img_crop"; }; @@ -4084,7 +3769,7 @@ export type components = { * Deleted At * @description The deleted timestamp of the image. */ - deleted_at?: string; + deleted_at?: string | null; /** * Is Intermediate * @description Whether this is an intermediate image. @@ -4094,12 +3779,12 @@ export type components = { * Session Id * @description The session ID that generated this image, if it is a generated image. */ - session_id?: string; + session_id?: string | null; /** * Node Id * @description The node ID that generated this image, if it is a generated image. */ - node_id?: string; + node_id?: string | null; /** * Starred * @description Whether this image is starred. @@ -4109,7 +3794,7 @@ export type components = { * Board Id * @description The id of the board the image belongs to, if one exists. */ - board_id?: string; + board_id?: string | null; }; /** * ImageField @@ -4137,22 +3822,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to adjust - */ + use_cache?: boolean | null; + /** @description The image to adjust */ image?: components["schemas"]["ImageField"]; /** * Hue @@ -4161,9 +3843,9 @@ export type components = { */ hue?: number; /** - * Type + * type * @default img_hue_adjust - * @enum {string} + * @constant */ type: "img_hue_adjust"; }; @@ -4182,22 +3864,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to lerp - */ + use_cache?: boolean | null; + /** @description The image to lerp */ image?: components["schemas"]["ImageField"]; /** * Min @@ -4212,9 +3891,9 @@ export type components = { */ max?: number; /** - * Type + * type * @default img_ilerp - * @enum {string} + * @constant */ type: "img_ilerp"; }; @@ -4233,27 +3912,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to load - */ + use_cache?: boolean | null; + /** @description The image to load */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default image - * @enum {string} + * @constant */ type: "image"; }; @@ -4272,22 +3948,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to lerp - */ + use_cache?: boolean | null; + /** @description The image to lerp */ image?: components["schemas"]["ImageField"]; /** * Min @@ -4302,9 +3975,9 @@ export type components = { */ max?: number; /** - * Type + * type * @default img_lerp - * @enum {string} + * @constant */ type: "img_lerp"; }; @@ -4317,12 +3990,12 @@ export type components = { * Metadata * @description The image's core metadata, if it was created in the Linear or Canvas UI */ - metadata?: Record; + metadata?: Record | null; /** * Graph * @description The graph that created the image */ - graph?: Record; + graph?: Record | null; }; /** * Multiply Images @@ -4339,32 +4012,26 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image1 - * @description The first image to multiply - */ + use_cache?: boolean | null; + /** @description The first image to multiply */ image1?: components["schemas"]["ImageField"]; - /** - * Image2 - * @description The second image to multiply - */ + /** @description The second image to multiply */ image2?: components["schemas"]["ImageField"]; /** - * Type + * type * @default img_mul - * @enum {string} + * @constant */ type: "img_mul"; }; @@ -4383,44 +4050,35 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; + /** @description The image to check */ + image?: components["schemas"]["ImageField"]; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default img_nsfw - * @enum {string} + * @constant */ type: "img_nsfw"; - /** - * Image - * @description The image to check - */ - image?: components["schemas"]["ImageField"]; }; /** * ImageOutput * @description Base class for nodes that output a single image */ ImageOutput: { - /** - * Image - * @description The output image - */ + /** @description The output image */ image: components["schemas"]["ImageField"]; /** * Width @@ -4433,9 +4091,9 @@ export type components = { */ height: number; /** - * Type + * type * @default image_output - * @enum {string} + * @constant */ type: "image_output"; }; @@ -4454,33 +4112,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Base Image - * @description The base image - */ + use_cache?: boolean | null; + /** @description The base image */ base_image?: components["schemas"]["ImageField"]; - /** - * Image - * @description The image to paste - */ + /** @description The image to paste */ image?: components["schemas"]["ImageField"]; - /** - * Mask - * @description The mask to use when pasting - */ - mask?: components["schemas"]["ImageField"]; + /** @description The mask to use when pasting */ + mask?: components["schemas"]["ImageField"] | null; /** * X * @description The left x coordinate at which to paste the image @@ -4500,51 +4149,12 @@ export type components = { */ crop?: boolean; /** - * Type + * type * @default img_paste - * @enum {string} + * @constant */ type: "img_paste"; }; - /** - * Base Image Processor - * @description Base class for invocations that preprocess images for ControlNet - */ - ImageProcessorInvocation: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ - image?: components["schemas"]["ImageField"]; - /** - * Type - * @default image_processor - * @enum {string} - */ - type: "image_processor"; - }; /** * ImageRecordChanges * @description A set of changes to apply to an image record. @@ -4557,22 +4167,23 @@ export type components = { */ ImageRecordChanges: { /** @description The image's new category. */ - image_category?: components["schemas"]["ImageCategory"]; + image_category?: components["schemas"]["ImageCategory"] | null; /** * Session Id * @description The image's new session ID. */ - session_id?: string; + session_id?: string | null; /** * Is Intermediate * @description The image's new `is_intermediate` flag. */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Starred * @description The image's new `starred` state */ - starred?: boolean; + starred?: boolean | null; + [key: string]: unknown; }; /** * Resize Image @@ -4589,22 +4200,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to resize - */ + use_cache?: boolean | null; + /** @description The image to resize */ image?: components["schemas"]["ImageField"]; /** * Width @@ -4625,15 +4233,12 @@ export type components = { * @enum {string} */ resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default img_resize - * @enum {string} + * @constant */ type: "img_resize"; }; @@ -4652,22 +4257,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to scale - */ + use_cache?: boolean | null; + /** @description The image to scale */ image?: components["schemas"]["ImageField"]; /** * Scale Factor @@ -4683,9 +4285,9 @@ export type components = { */ resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; /** - * Type + * type * @default img_scale - * @enum {string} + * @constant */ type: "img_scale"; }; @@ -4704,27 +4306,21 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to encode - */ + use_cache?: boolean | null; + /** @description The image to encode */ image?: components["schemas"]["ImageField"]; - /** - * Vae - * @description VAE - */ + /** @description VAE */ vae?: components["schemas"]["VaeField"]; /** * Tiled @@ -4735,13 +4331,13 @@ export type components = { /** * Fp32 * @description Whether or not to use full float32 precision - * @default true + * @default false */ fp32?: boolean; /** - * Type + * type * @default i2l - * @enum {string} + * @constant */ type: "i2l"; }; @@ -4781,22 +4377,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to check - */ + use_cache?: boolean | null; + /** @description The image to check */ image?: components["schemas"]["ImageField"]; /** * Text @@ -4804,15 +4397,12 @@ export type components = { * @default InvokeAI */ text?: string; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default img_watermark - * @enum {string} + * @constant */ type: "img_watermark"; }; @@ -4822,7 +4412,7 @@ export type components = { * Response * @description If defined, the message to display to the user when images begin downloading */ - response?: string; + response: string | null; }; /** ImagesUpdatedFromListResult */ ImagesUpdatedFromListResult: { @@ -4847,38 +4437,34 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to infill - */ + use_cache?: boolean | null; + /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** - * Color * @description The color to use to infill * @default { - * "r": 127, - * "g": 127, + * "a": 255, * "b": 127, - * "a": 255 + * "g": 127, + * "r": 127 * } */ color?: components["schemas"]["ColorField"]; /** - * Type + * type * @default infill_rgba - * @enum {string} + * @constant */ type: "infill_rgba"; }; @@ -4897,22 +4483,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to infill - */ + use_cache?: boolean | null; + /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** * Downscale @@ -4928,9 +4511,9 @@ export type components = { */ resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; /** - * Type + * type * @default infill_patchmatch - * @enum {string} + * @constant */ type: "infill_patchmatch"; }; @@ -4949,22 +4532,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to infill - */ + use_cache?: boolean | null; + /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** * Tile Size @@ -4978,9 +4558,9 @@ export type components = { */ seed?: number; /** - * Type + * type * @default infill_tile - * @enum {string} + * @constant */ type: "infill_tile"; }; @@ -4999,27 +4579,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of integer values */ collection?: number[]; /** - * Type + * type * @default integer_collection - * @enum {string} + * @constant */ type: "integer_collection"; }; @@ -5034,9 +4614,9 @@ export type components = { */ collection: number[]; /** - * Type + * type * @default integer_collection_output - * @enum {string} + * @constant */ type: "integer_collection_output"; }; @@ -5055,18 +4635,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The integer value @@ -5074,9 +4654,9 @@ export type components = { */ value?: number; /** - * Type + * type * @default integer - * @enum {string} + * @constant */ type: "integer"; }; @@ -5095,18 +4675,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Operation * @description The operation to perform @@ -5127,9 +4707,9 @@ export type components = { */ b?: number; /** - * Type + * type * @default integer_math - * @enum {string} + * @constant */ type: "integer_math"; }; @@ -5144,9 +4724,9 @@ export type components = { */ value: number; /** - * Type + * type * @default integer_output - * @enum {string} + * @constant */ type: "integer_output"; }; @@ -5193,18 +4773,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The list of items to iterate over @@ -5217,9 +4797,9 @@ export type components = { */ index?: number; /** - * Type + * type * @default iterate - * @enum {string} + * @constant */ type: "iterate"; }; @@ -5232,11 +4812,11 @@ export type components = { * Collection Item * @description The item being iterated over */ - item?: unknown; + item: unknown; /** - * Type + * type * @default iterate_output - * @enum {string} + * @constant */ type: "iterate_output"; }; @@ -5255,27 +4835,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to infill - */ + use_cache?: boolean | null; + /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default infill_lama - * @enum {string} + * @constant */ type: "infill_lama"; }; @@ -5294,27 +4871,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of latents tensors */ collection?: components["schemas"]["LatentsField"][]; /** - * Type + * type * @default latents_collection - * @enum {string} + * @constant */ type: "latents_collection"; }; @@ -5329,9 +4906,9 @@ export type components = { */ collection: components["schemas"]["LatentsField"][]; /** - * Type + * type * @default latents_collection_output - * @enum {string} + * @constant */ type: "latents_collection_output"; }; @@ -5349,7 +4926,7 @@ export type components = { * Seed * @description Seed used to generate this latents */ - seed?: number; + seed?: number | null; }; /** * Latents Primitive @@ -5366,27 +4943,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Latents - * @description The latents tensor - */ + use_cache?: boolean | null; + /** @description The latents tensor */ latents?: components["schemas"]["LatentsField"]; /** - * Type + * type * @default latents - * @enum {string} + * @constant */ type: "latents"; }; @@ -5395,10 +4969,7 @@ export type components = { * @description Base class for nodes that output a single latents tensor */ LatentsOutput: { - /** - * Latents - * @description Latents tensor - */ + /** @description Latents tensor */ latents: components["schemas"]["LatentsField"]; /** * Width @@ -5411,9 +4982,9 @@ export type components = { */ height: number; /** - * Type + * type * @default latents_output - * @enum {string} + * @constant */ type: "latents_output"; }; @@ -5432,18 +5003,22 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; + /** @description Latents tensor */ + latents?: components["schemas"]["LatentsField"]; + /** @description VAE */ + vae?: components["schemas"]["VaeField"]; /** * Tiled * @description Processing using overlapping tiles (reduce memory consumption) @@ -5453,30 +5028,17 @@ export type components = { /** * Fp32 * @description Whether or not to use full float32 precision - * @default true + * @default false */ fp32?: boolean; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default l2i - * @enum {string} + * @constant */ type: "l2i"; - /** - * Latents - * @description Latents tensor - */ - latents?: components["schemas"]["LatentsField"]; - /** - * Vae - * @description VAE - */ - vae?: components["schemas"]["VaeField"]; }; /** * Leres (Depth) Processor @@ -5493,29 +5055,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default leres_image_processor - * @enum {string} - */ - type: "leres_image_processor"; /** * Thr A * @description Leres parameter `thr_a` @@ -5546,6 +5099,12 @@ export type components = { * @default 512 */ image_resolution?: number; + /** + * type + * @default leres_image_processor + * @constant + */ + type: "leres_image_processor"; }; /** * Lineart Anime Processor @@ -5562,29 +5121,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default lineart_anime_image_processor - * @enum {string} - */ - type: "lineart_anime_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -5597,6 +5147,12 @@ export type components = { * @default 512 */ image_resolution?: number; + /** + * type + * @default lineart_anime_image_processor + * @constant + */ + type: "lineart_anime_image_processor"; }; /** * Lineart Processor @@ -5613,29 +5169,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default lineart_image_processor - * @enum {string} - */ - type: "lineart_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -5654,16 +5201,19 @@ export type components = { * @default false */ coarse?: boolean; + /** + * type + * @default lineart_image_processor + * @constant + */ + type: "lineart_image_processor"; }; /** * LoRAMetadataField * @description LoRA metadata for an image generated in InvokeAI. */ LoRAMetadataField: { - /** - * Lora - * @description The LoRA model - */ + /** @description The LoRA model */ lora: components["schemas"]["LoRAModelField"]; /** * Weight @@ -5678,15 +5228,16 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default lora + * @constant */ model_type: "lora"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; model_format: components["schemas"]["LoRAModelFormat"]; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** * LoRAModelField @@ -5703,13 +5254,11 @@ export type components = { }; /** * LoRAModelFormat - * @description An enumeration. * @enum {string} */ LoRAModelFormat: "lycoris" | "diffusers"; /** * LogLevel - * @description An enumeration. * @enum {integer} */ LogLevel: 0 | 10 | 20 | 30 | 40 | 50; @@ -5725,7 +5274,7 @@ export type components = { /** @description Info to load submodel */ model_type: components["schemas"]["ModelType"]; /** @description Info to load submodel */ - submodel?: components["schemas"]["SubModelType"]; + submodel?: components["schemas"]["SubModelType"] | null; /** * Weight * @description Lora's weight which to use when apply to model @@ -5747,18 +5296,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * LoRA * @description LoRA model to load @@ -5774,16 +5323,16 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components["schemas"]["ClipField"] | null; /** - * Type + * type * @default lora_loader - * @enum {string} + * @constant */ type: "lora_loader"; }; @@ -5796,16 +5345,16 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components["schemas"]["ClipField"] | null; /** - * Type + * type * @default lora_loader_output - * @enum {string} + * @constant */ type: "lora_loader_output"; }; @@ -5839,27 +5388,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Model - * @description Main model (UNet, VAE, CLIP) to load - */ + use_cache?: boolean | null; + /** @description Main model (UNet, VAE, CLIP) to load */ model: components["schemas"]["MainModelField"]; /** - * Type + * type * @default main_model_loader - * @enum {string} + * @constant */ type: "main_model_loader"; }; @@ -5878,32 +5424,26 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Mask1 - * @description The first mask to combine - */ + use_cache?: boolean | null; + /** @description The first mask to combine */ mask1?: components["schemas"]["ImageField"]; - /** - * Mask2 - * @description The second image to combine - */ + /** @description The second image to combine */ mask2?: components["schemas"]["ImageField"]; /** - * Type + * type * @default mask_combine - * @enum {string} + * @constant */ type: "mask_combine"; }; @@ -5922,22 +5462,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to apply the mask to - */ + use_cache?: boolean | null; + /** @description The image to apply the mask to */ image?: components["schemas"]["ImageField"]; /** * Edge Size @@ -5960,9 +5497,9 @@ export type components = { */ high_threshold?: number; /** - * Type + * type * @default mask_edge - * @enum {string} + * @constant */ type: "mask_edge"; }; @@ -5981,22 +5518,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to create the mask from - */ + use_cache?: boolean | null; + /** @description The image to create the mask from */ image?: components["schemas"]["ImageField"]; /** * Invert @@ -6005,9 +5539,9 @@ export type components = { */ invert?: boolean; /** - * Type + * type * @default tomask - * @enum {string} + * @constant */ type: "tomask"; }; @@ -6026,29 +5560,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default mediapipe_face_processor - * @enum {string} - */ - type: "mediapipe_face_processor"; /** * Max Faces * @description Maximum number of faces to detect @@ -6061,13 +5586,50 @@ export type components = { * @default 0.5 */ min_confidence?: number; + /** + * type + * @default mediapipe_face_processor + * @constant + */ + type: "mediapipe_face_processor"; }; /** * MergeInterpolationMethod - * @description An enumeration. * @enum {string} */ MergeInterpolationMethod: "weighted_sum" | "sigmoid" | "inv_sigmoid" | "add_difference"; + /** MergeModelsBody */ + MergeModelsBody: { + /** + * Model Names + * @description model name + */ + model_names: string[]; + /** + * Merged Model Name + * @description Name of destination model + */ + merged_model_name: string | null; + /** + * Alpha + * @description Alpha weighting strength to apply to 2d and 3d models + * @default 0.5 + */ + alpha?: number | null; + /** @description Interpolation method */ + interp: components["schemas"]["MergeInterpolationMethod"] | null; + /** + * Force + * @description Force merging of models created with different versions of diffusers + * @default false + */ + force?: boolean | null; + /** + * Merge Dest Directory + * @description Save the merged model to the designated directory (with 'merged_model_name' appended) + */ + merge_dest_directory?: string | null; + }; /** * Metadata Accumulator * @description Outputs a Core Metadata Object @@ -6083,177 +5645,168 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Generation Mode * @description The generation mode that output this image */ - generation_mode?: string; + generation_mode?: string | null; /** * Positive Prompt * @description The positive prompt parameter */ - positive_prompt?: string; + positive_prompt?: string | null; /** * Negative Prompt * @description The negative prompt parameter */ - negative_prompt?: string; + negative_prompt?: string | null; /** * Width * @description The width parameter */ - width?: number; + width?: number | null; /** * Height * @description The height parameter */ - height?: number; + height?: number | null; /** * Seed * @description The seed used for noise generation */ - seed?: number; + seed?: number | null; /** * Rand Device * @description The device used for random number generation */ - rand_device?: string; + rand_device?: string | null; /** * Cfg Scale * @description The classifier-free guidance scale parameter */ - cfg_scale?: number; + cfg_scale?: number | null; /** * Steps * @description The number of steps used for inference */ - steps?: number; + steps?: number | null; /** * Scheduler * @description The scheduler used for inference */ - scheduler?: string; + scheduler?: string | null; /** * Clip Skip * @description The number of skipped CLIP layers */ - clip_skip?: number; - /** - * Model - * @description The main model used for inference - */ - model?: components["schemas"]["MainModelField"]; + clip_skip?: number | null; + /** @description The main model used for inference */ + model?: components["schemas"]["MainModelField"] | null; /** * Controlnets * @description The ControlNets used for inference */ - controlnets?: components["schemas"]["ControlField"][]; + controlnets?: components["schemas"]["ControlField"][] | null; /** * Ipadapters * @description The IP Adapters used for inference */ - ipAdapters?: components["schemas"]["IPAdapterMetadataField"][]; + ipAdapters?: components["schemas"]["IPAdapterMetadataField"][] | null; /** * T2Iadapters * @description The IP Adapters used for inference */ - t2iAdapters?: components["schemas"]["T2IAdapterField"][]; + t2iAdapters?: components["schemas"]["T2IAdapterField"][] | null; /** * Loras * @description The LoRAs used for inference */ - loras?: components["schemas"]["LoRAMetadataField"][]; + loras?: components["schemas"]["LoRAMetadataField"][] | null; /** * Strength * @description The strength used for latents-to-latents */ - strength?: number; + strength?: number | null; /** * Init Image * @description The name of the initial image */ - init_image?: string; - /** - * Vae - * @description The VAE used for decoding, if the main model's default was not used - */ - vae?: components["schemas"]["VAEModelField"]; + init_image?: string | null; + /** @description The VAE used for decoding, if the main model's default was not used */ + vae?: components["schemas"]["VAEModelField"] | null; /** * Hrf Width * @description The high resolution fix height and width multipler. */ - hrf_width?: number; + hrf_width?: number | null; /** * Hrf Height * @description The high resolution fix height and width multipler. */ - hrf_height?: number; + hrf_height?: number | null; /** * Hrf Strength * @description The high resolution fix img2img strength used in the upscale pass. */ - hrf_strength?: number; + hrf_strength?: number | null; /** * Positive Style Prompt * @description The positive style prompt parameter */ - positive_style_prompt?: string; + positive_style_prompt?: string | null; /** * Negative Style Prompt * @description The negative style prompt parameter */ - negative_style_prompt?: string; - /** - * Refiner Model - * @description The SDXL Refiner model used - */ - refiner_model?: components["schemas"]["MainModelField"]; + negative_style_prompt?: string | null; + /** @description The SDXL Refiner model used */ + refiner_model?: components["schemas"]["MainModelField"] | null; /** * Refiner Cfg Scale * @description The classifier-free guidance scale parameter used for the refiner */ - refiner_cfg_scale?: number; + refiner_cfg_scale?: number | null; /** * Refiner Steps * @description The number of steps used for the refiner */ - refiner_steps?: number; + refiner_steps?: number | null; /** * Refiner Scheduler * @description The scheduler used for the refiner */ - refiner_scheduler?: string; + refiner_scheduler?: string | null; /** * Refiner Positive Aesthetic Score * @description The aesthetic score used for the refiner */ - refiner_positive_aesthetic_score?: number; + refiner_positive_aesthetic_score?: number | null; /** * Refiner Negative Aesthetic Score * @description The aesthetic score used for the refiner */ - refiner_negative_aesthetic_score?: number; + refiner_negative_aesthetic_score?: number | null; /** * Refiner Start * @description The start value used for refiner denoising */ - refiner_start?: number; + refiner_start?: number | null; /** - * Type + * type * @default metadata_accumulator - * @enum {string} + * @constant */ type: "metadata_accumulator"; }; @@ -6262,15 +5815,12 @@ export type components = { * @description The output of the MetadataAccumulator node */ MetadataAccumulatorOutput: { - /** - * Metadata - * @description The core metadata for the image - */ + /** @description The core metadata for the image */ metadata: components["schemas"]["CoreMetadata"]; /** - * Type + * type * @default metadata_accumulator_output - * @enum {string} + * @constant */ type: "metadata_accumulator_output"; }; @@ -6289,29 +5839,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default midas_depth_image_processor - * @enum {string} - */ - type: "midas_depth_image_processor"; /** * A Mult * @description Midas parameter `a_mult` (a = a_mult * PI) @@ -6324,6 +5865,12 @@ export type components = { * @default 0.1 */ bg_th?: number; + /** + * type + * @default midas_depth_image_processor + * @constant + */ + type: "midas_depth_image_processor"; }; /** * MLSD Processor @@ -6340,29 +5887,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default mlsd_image_processor - * @enum {string} - */ - type: "mlsd_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -6387,11 +5925,16 @@ export type components = { * @default 0.1 */ thr_d?: number; + /** + * type + * @default mlsd_image_processor + * @constant + */ + type: "mlsd_image_processor"; }; /** * ModelError - * @description An enumeration. - * @enum {string} + * @constant */ ModelError: "not_found"; /** ModelInfo */ @@ -6406,7 +5949,7 @@ export type components = { /** @description Info to load submodel */ model_type: components["schemas"]["ModelType"]; /** @description Info to load submodel */ - submodel?: components["schemas"]["SubModelType"]; + submodel?: components["schemas"]["SubModelType"] | null; }; /** * ModelLoaderOutput @@ -6429,21 +5972,19 @@ export type components = { */ vae: components["schemas"]["VaeField"]; /** - * Type + * type * @default model_loader_output - * @enum {string} + * @constant */ type: "model_loader_output"; }; /** * ModelType - * @description An enumeration. * @enum {string} */ ModelType: "onnx" | "main" | "vae" | "lora" | "controlnet" | "embedding" | "ip_adapter" | "clip_vision" | "t2i_adapter"; /** * ModelVariantType - * @description An enumeration. * @enum {string} */ ModelVariantType: "normal" | "inpaint" | "depth"; @@ -6467,18 +6008,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * A * @description The first number @@ -6492,9 +6033,9 @@ export type components = { */ b?: number; /** - * Type + * type * @default mul - * @enum {string} + * @constant */ type: "mul"; }; @@ -6531,18 +6072,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Seed * @description Seed for random number generation @@ -6567,9 +6108,9 @@ export type components = { */ use_cpu?: boolean; /** - * Type + * type * @default noise - * @enum {string} + * @constant */ type: "noise"; }; @@ -6578,11 +6119,8 @@ export type components = { * @description Invocation noise output */ NoiseOutput: { - /** - * Noise - * @description Noise tensor - */ - noise?: components["schemas"]["LatentsField"]; + /** @description Noise tensor */ + noise: components["schemas"]["LatentsField"]; /** * Width * @description Width of output (px) @@ -6594,9 +6132,9 @@ export type components = { */ height: number; /** - * Type + * type * @default noise_output - * @enum {string} + * @constant */ type: "noise_output"; }; @@ -6615,29 +6153,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default normalbae_image_processor - * @enum {string} - */ - type: "normalbae_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -6650,6 +6179,12 @@ export type components = { * @default 512 */ image_resolution?: number; + /** + * type + * @default normalbae_image_processor + * @constant + */ + type: "normalbae_image_processor"; }; /** * ONNX Latents to Image @@ -6666,37 +6201,28 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Latents - * @description Denoised latents tensor - */ + use_cache?: boolean | null; + /** @description Denoised latents tensor */ latents?: components["schemas"]["LatentsField"]; - /** - * Vae - * @description VAE - */ + /** @description VAE */ vae?: components["schemas"]["VaeField"]; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default l2i_onnx - * @enum {string} + * @constant */ type: "l2i_onnx"; }; @@ -6726,19 +6252,13 @@ export type components = { */ vae_encoder?: components["schemas"]["VaeField"]; /** - * Type + * type * @default model_loader_output_onnx - * @enum {string} + * @constant */ type: "model_loader_output_onnx"; }; - /** - * ONNX Prompt (Raw) - * @description A node to process inputs and produce outputs. - * May use dependency injection in __init__ to receive providers. - * - * All invocations must use the `@invocation` decorator to provide their unique type. - */ + /** ONNX Prompt (Raw) */ ONNXPromptInvocation: { /** * Id @@ -6750,33 +6270,30 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Prompt * @description Raw prompt text (no parsing) * @default */ prompt?: string; - /** - * Clip - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ + /** @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ clip?: components["schemas"]["ClipField"]; /** - * Type + * type * @default prompt_onnx - * @enum {string} + * @constant */ type: "prompt_onnx"; }; @@ -6787,19 +6304,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default onnx + * @constant */ model_type: "onnx"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "onnx"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; variant: components["schemas"]["ModelVariantType"]; }; /** ONNXStableDiffusion2ModelConfig */ @@ -6809,19 +6327,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default onnx + * @constant */ model_type: "onnx"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "onnx"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; variant: components["schemas"]["ModelVariantType"]; prediction_type: components["schemas"]["SchedulerPredictionType"]; /** Upcast Attention */ @@ -6842,32 +6361,23 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Positive Conditioning - * @description Positive conditioning tensor - */ + use_cache?: boolean | null; + /** @description Positive conditioning tensor */ positive_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Negative Conditioning - * @description Negative conditioning tensor - */ + /** @description Negative conditioning tensor */ negative_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Noise - * @description Noise tensor - */ + /** @description Noise tensor */ noise?: components["schemas"]["LatentsField"]; /** * Steps @@ -6895,10 +6405,7 @@ export type components = { * @enum {string} */ precision?: "tensor(bool)" | "tensor(int8)" | "tensor(uint8)" | "tensor(int16)" | "tensor(uint16)" | "tensor(int32)" | "tensor(uint32)" | "tensor(int64)" | "tensor(uint64)" | "tensor(float16)" | "tensor(float)" | "tensor(double)"; - /** - * Unet - * @description UNet (scheduler, LoRAs) - */ + /** @description UNet (scheduler, LoRAs) */ unet?: components["schemas"]["UNetField"]; /** * Control @@ -6906,17 +6413,13 @@ export type components = { */ control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][]; /** - * Type + * type * @default t2l_onnx - * @enum {string} + * @constant */ type: "t2l_onnx"; }; - /** - * OffsetPaginatedResults[BoardDTO] - * @description Offset-paginated results - * Generic must be a Pydantic model - */ + /** OffsetPaginatedResults[BoardDTO] */ OffsetPaginatedResults_BoardDTO_: { /** * Limit @@ -6939,11 +6442,7 @@ export type components = { */ items: components["schemas"]["BoardDTO"][]; }; - /** - * OffsetPaginatedResults[ImageDTO] - * @description Offset-paginated results - * Generic must be a Pydantic model - */ + /** OffsetPaginatedResults[ImageDTO] */ OffsetPaginatedResults_ImageDTO_: { /** * Limit @@ -6996,27 +6495,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Model - * @description ONNX Main model (UNet, VAE, CLIP) to load - */ + use_cache?: boolean | null; + /** @description ONNX Main model (UNet, VAE, CLIP) to load */ model: components["schemas"]["OnnxModelField"]; /** - * Type + * type * @default onnx_model_loader - * @enum {string} + * @constant */ type: "onnx_model_loader"; }; @@ -7035,29 +6531,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default openpose_image_processor - * @enum {string} - */ - type: "openpose_image_processor"; /** * Hand And Face * @description Whether to use hands and face mode @@ -7076,38 +6563,12 @@ export type components = { * @default 512 */ image_resolution?: number; - }; - /** - * PaginatedResults[GraphExecutionState] - * @description Paginated results - * Generic must be a Pydantic model - */ - PaginatedResults_GraphExecutionState_: { /** - * Page - * @description Current Page + * type + * @default openpose_image_processor + * @constant */ - page: number; - /** - * Pages - * @description Total number of pages - */ - pages: number; - /** - * Per Page - * @description Number of items per page - */ - per_page: number; - /** - * Total - * @description Total number of items in result - */ - total: number; - /** - * Items - * @description Items - */ - items: components["schemas"]["GraphExecutionState"][]; + type: "openpose_image_processor"; }; /** * PIDI Processor @@ -7124,29 +6585,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default pidi_image_processor - * @enum {string} - */ - type: "pidi_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -7171,6 +6623,12 @@ export type components = { * @default false */ scribble?: boolean; + /** + * type + * @default pidi_image_processor + * @constant + */ + type: "pidi_image_processor"; }; /** * Prompts from File @@ -7187,18 +6645,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * File Path * @description Path to prompt text file @@ -7208,12 +6666,12 @@ export type components = { * Pre Prompt * @description String to prepend to each prompt */ - pre_prompt?: string; + pre_prompt?: string | null; /** * Post Prompt * @description String to append to each prompt */ - post_prompt?: string; + post_prompt?: string | null; /** * Start Line * @description Line in the file to start start from @@ -7227,9 +6685,9 @@ export type components = { */ max_prompts?: number; /** - * Type + * type * @default prompt_from_file - * @enum {string} + * @constant */ type: "prompt_from_file"; }; @@ -7259,18 +6717,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache - * @default true + * @default false */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Low * @description The inclusive low value @@ -7290,9 +6748,9 @@ export type components = { */ decimals?: number; /** - * Type + * type * @default rand_float - * @enum {string} + * @constant */ type: "rand_float"; }; @@ -7311,18 +6769,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default false */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Low * @description The inclusive low value @@ -7336,9 +6794,9 @@ export type components = { */ high?: number; /** - * Type + * type * @default rand_int - * @enum {string} + * @constant */ type: "rand_int"; }; @@ -7357,18 +6815,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default false */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Low * @description The inclusive low value @@ -7393,9 +6851,9 @@ export type components = { */ seed?: number; /** - * Type + * type * @default random_range - * @enum {string} + * @constant */ type: "random_range"; }; @@ -7414,18 +6872,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Start * @description The start of the range @@ -7445,9 +6903,9 @@ export type components = { */ step?: number; /** - * Type + * type * @default range - * @enum {string} + * @constant */ type: "range"; }; @@ -7466,18 +6924,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Start * @description The start of the range @@ -7497,9 +6955,9 @@ export type components = { */ step?: number; /** - * Type + * type * @default range_of_size - * @enum {string} + * @constant */ type: "range_of_size"; }; @@ -7526,22 +6984,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Latents - * @description Latents tensor - */ + use_cache?: boolean | null; + /** @description Latents tensor */ latents?: components["schemas"]["LatentsField"]; /** * Width @@ -7567,9 +7022,9 @@ export type components = { */ antialias?: boolean; /** - * Type + * type * @default lresize - * @enum {string} + * @constant */ type: "lresize"; }; @@ -7598,18 +7053,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The float value @@ -7623,9 +7078,9 @@ export type components = { */ decimals?: number; /** - * Type + * type * @default round_float - * @enum {string} + * @constant */ type: "round_float"; }; @@ -7644,18 +7099,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Prompt * @description Prompt to be parsed by Compel to create a conditioning tensor @@ -7709,9 +7164,9 @@ export type components = { */ clip2?: components["schemas"]["ClipField"]; /** - * Type + * type * @default sdxl_compel_prompt - * @enum {string} + * @constant */ type: "sdxl_compel_prompt"; }; @@ -7730,18 +7185,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * LoRA * @description LoRA model to load @@ -7757,21 +7212,21 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * CLIP 1 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components["schemas"]["ClipField"] | null; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2?: components["schemas"]["ClipField"]; + clip2?: components["schemas"]["ClipField"] | null; /** - * Type + * type * @default sdxl_lora_loader - * @enum {string} + * @constant */ type: "sdxl_lora_loader"; }; @@ -7784,21 +7239,21 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * CLIP 1 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components["schemas"]["ClipField"] | null; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2?: components["schemas"]["ClipField"]; + clip2?: components["schemas"]["ClipField"] | null; /** - * Type + * type * @default sdxl_lora_loader_output - * @enum {string} + * @constant */ type: "sdxl_lora_loader_output"; }; @@ -7817,27 +7272,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Model - * @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load - */ + use_cache?: boolean | null; + /** @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load */ model: components["schemas"]["MainModelField"]; /** - * Type + * type * @default sdxl_model_loader - * @enum {string} + * @constant */ type: "sdxl_model_loader"; }; @@ -7867,9 +7319,9 @@ export type components = { */ vae: components["schemas"]["VaeField"]; /** - * Type + * type * @default sdxl_model_loader_output - * @enum {string} + * @constant */ type: "sdxl_model_loader_output"; }; @@ -7888,18 +7340,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Style * @description Prompt to be parsed by Compel to create a conditioning tensor @@ -7932,15 +7384,12 @@ export type components = { * @default 6 */ aesthetic_score?: number; - /** - * Clip2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ + /** @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ clip2?: components["schemas"]["ClipField"]; /** - * Type + * type * @default sdxl_refiner_compel_prompt - * @enum {string} + * @constant */ type: "sdxl_refiner_compel_prompt"; }; @@ -7959,27 +7408,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Model - * @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load - */ + use_cache?: boolean | null; + /** @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load */ model: components["schemas"]["MainModelField"]; /** - * Type + * type * @default sdxl_refiner_model_loader - * @enum {string} + * @constant */ type: "sdxl_refiner_model_loader"; }; @@ -8004,9 +7450,9 @@ export type components = { */ vae: components["schemas"]["VaeField"]; /** - * Type + * type * @default sdxl_refiner_model_loader_output - * @enum {string} + * @constant */ type: "sdxl_refiner_model_loader_output"; }; @@ -8025,37 +7471,28 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default false */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; + /** @description The board to save the image to */ + board?: components["schemas"]["BoardField"] | null; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Board - * @description The board to save the image to - */ - board?: components["schemas"]["BoardField"]; - /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default save_image - * @enum {string} + * @constant */ type: "save_image"; }; @@ -8074,22 +7511,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Latents - * @description Latents tensor - */ + use_cache?: boolean | null; + /** @description Latents tensor */ latents?: components["schemas"]["LatentsField"]; /** * Scale Factor @@ -8110,9 +7544,9 @@ export type components = { */ antialias?: boolean; /** - * Type + * type * @default lscale - * @enum {string} + * @constant */ type: "lscale"; }; @@ -8131,18 +7565,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Scheduler * @description Scheduler to use during inference @@ -8151,18 +7585,13 @@ export type components = { */ scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; /** - * Type + * type * @default scheduler - * @enum {string} + * @constant */ type: "scheduler"; }; - /** - * SchedulerOutput - * @description Base class for all invocation outputs. - * - * All invocation outputs must use the `@invocation_output` decorator to provide their unique type. - */ + /** SchedulerOutput */ SchedulerOutput: { /** * Scheduler @@ -8171,15 +7600,14 @@ export type components = { */ scheduler: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; /** - * Type + * type * @default scheduler_output - * @enum {string} + * @constant */ type: "scheduler_output"; }; /** * SchedulerPredictionType - * @description An enumeration. * @enum {string} */ SchedulerPredictionType: "epsilon" | "v_prediction" | "sample"; @@ -8198,28 +7626,28 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * VAE * @description VAE model to load */ - vae?: components["schemas"]["VaeField"]; + vae?: components["schemas"]["VaeField"] | null; /** * Seamless Y * @description Specify whether Y axis is seamless @@ -8233,9 +7661,9 @@ export type components = { */ seamless_x?: boolean; /** - * Type + * type * @default seamless - * @enum {string} + * @constant */ type: "seamless"; }; @@ -8248,16 +7676,16 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * VAE * @description VAE */ - vae?: components["schemas"]["VaeField"]; + vae?: components["schemas"]["VaeField"] | null; /** - * Type + * type * @default seamless_output - * @enum {string} + * @constant */ type: "seamless_output"; }; @@ -8276,27 +7704,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default segment_anything_processor - * @enum {string} + * @constant */ type: "segment_anything_processor"; }; @@ -8321,10 +7746,7 @@ export type components = { queue: components["schemas"]["SessionQueueStatus"]; processor: components["schemas"]["SessionProcessorStatus"]; }; - /** - * SessionQueueItem - * @description Session queue item without the full graph. Used for serialization. - */ + /** SessionQueueItem */ SessionQueueItem: { /** * Item Id @@ -8358,7 +7780,7 @@ export type components = { * Error * @description The error message if this queue item errored */ - error?: string; + error?: string | null; /** * Created At * @description When this queue item was created @@ -8373,12 +7795,12 @@ export type components = { * Started At * @description When this queue item was started */ - started_at?: string; + started_at?: string | null; /** * Completed At * @description When this queue item was completed */ - completed_at?: string; + completed_at?: string | null; /** * Queue Id * @description The id of the queue with which this item is associated @@ -8388,17 +7810,11 @@ export type components = { * Field Values * @description The field values that were used for this queue item */ - field_values?: components["schemas"]["NodeFieldValue"][]; - /** - * Session - * @description The fully-populated session to be executed - */ + field_values?: components["schemas"]["NodeFieldValue"][] | null; + /** @description The fully-populated session to be executed */ session: components["schemas"]["GraphExecutionState"]; }; - /** - * SessionQueueItemDTO - * @description Session queue item without the full graph. Used for serialization. - */ + /** SessionQueueItemDTO */ SessionQueueItemDTO: { /** * Item Id @@ -8432,7 +7848,7 @@ export type components = { * Error * @description The error message if this queue item errored */ - error?: string; + error?: string | null; /** * Created At * @description When this queue item was created @@ -8447,12 +7863,12 @@ export type components = { * Started At * @description When this queue item was started */ - started_at?: string; + started_at?: string | null; /** * Completed At * @description When this queue item was completed */ - completed_at?: string; + completed_at?: string | null; /** * Queue Id * @description The id of the queue with which this item is associated @@ -8462,7 +7878,7 @@ export type components = { * Field Values * @description The field values that were used for this queue item */ - field_values?: components["schemas"]["NodeFieldValue"][]; + field_values?: components["schemas"]["NodeFieldValue"][] | null; }; /** SessionQueueStatus */ SessionQueueStatus: { @@ -8475,17 +7891,17 @@ export type components = { * Item Id * @description The current queue item id */ - item_id?: number; + item_id: number | null; /** * Batch Id * @description The current queue item's batch id */ - batch_id?: string; + batch_id: string | null; /** * Session Id * @description The current queue item's session id */ - session_id?: string; + session_id: string | null; /** * Pending * @description Number of queue items with status 'pending' @@ -8532,27 +7948,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to show - */ + use_cache?: boolean | null; + /** @description The image to show */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default show_image - * @enum {string} + * @constant */ type: "show_image"; }; @@ -8563,21 +7976,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "checkpoint"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; /** Config */ config: string; variant: components["schemas"]["ModelVariantType"]; @@ -8589,21 +8003,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; variant: components["schemas"]["ModelVariantType"]; }; /** StableDiffusion2ModelCheckpointConfig */ @@ -8613,21 +8028,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "checkpoint"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; /** Config */ config: string; variant: components["schemas"]["ModelVariantType"]; @@ -8639,21 +8055,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; variant: components["schemas"]["ModelVariantType"]; }; /** StableDiffusionXLModelCheckpointConfig */ @@ -8663,21 +8080,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "checkpoint"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; /** Config */ config: string; variant: components["schemas"]["ModelVariantType"]; @@ -8689,21 +8107,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; variant: components["schemas"]["ModelVariantType"]; }; /** @@ -8721,18 +8140,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Easing * @description The easing function to use @@ -8774,12 +8193,12 @@ export type components = { * Pre Start Value * @description value before easing start */ - pre_start_value?: number; + pre_start_value?: number | null; /** * Post End Value * @description value after easing end */ - post_end_value?: number; + post_end_value?: number | null; /** * Mirror * @description include mirror of easing function @@ -8793,9 +8212,9 @@ export type components = { */ show_easing_plot?: boolean; /** - * Type + * type * @default step_param_easing - * @enum {string} + * @constant */ type: "step_param_easing"; }; @@ -8815,9 +8234,9 @@ export type components = { */ string_2: string; /** - * Type + * type * @default string_2_output - * @enum {string} + * @constant */ type: "string_2_output"; }; @@ -8836,27 +8255,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of string values */ collection?: string[]; /** - * Type + * type * @default string_collection - * @enum {string} + * @constant */ type: "string_collection"; }; @@ -8871,9 +8290,9 @@ export type components = { */ collection: string[]; /** - * Type + * type * @default string_collection_output - * @enum {string} + * @constant */ type: "string_collection_output"; }; @@ -8892,18 +8311,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The string value @@ -8911,9 +8330,9 @@ export type components = { */ value?: string; /** - * Type + * type * @default string - * @enum {string} + * @constant */ type: "string"; }; @@ -8932,18 +8351,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * String Left * @description String Left @@ -8957,9 +8376,9 @@ export type components = { */ string_right?: string; /** - * Type + * type * @default string_join - * @enum {string} + * @constant */ type: "string_join"; }; @@ -8978,18 +8397,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * String Left * @description String Left @@ -9009,9 +8428,9 @@ export type components = { */ string_right?: string; /** - * Type + * type * @default string_join_three - * @enum {string} + * @constant */ type: "string_join_three"; }; @@ -9026,9 +8445,9 @@ export type components = { */ value: string; /** - * Type + * type * @default string_output - * @enum {string} + * @constant */ type: "string_output"; }; @@ -9048,9 +8467,9 @@ export type components = { */ negative_string: string; /** - * Type + * type * @default string_pos_neg_output - * @enum {string} + * @constant */ type: "string_pos_neg_output"; }; @@ -9069,18 +8488,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * String * @description String to work on @@ -9106,9 +8525,9 @@ export type components = { */ use_regex?: boolean; /** - * Type + * type * @default string_replace - * @enum {string} + * @constant */ type: "string_replace"; }; @@ -9127,18 +8546,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * String * @description String to split @@ -9152,9 +8571,9 @@ export type components = { */ delimiter?: string; /** - * Type + * type * @default string_split - * @enum {string} + * @constant */ type: "string_split"; }; @@ -9173,18 +8592,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * String * @description String to split @@ -9192,15 +8611,14 @@ export type components = { */ string?: string; /** - * Type + * type * @default string_split_neg - * @enum {string} + * @constant */ type: "string_split_neg"; }; /** * SubModelType - * @description An enumeration. * @enum {string} */ SubModelType: "unet" | "text_encoder" | "text_encoder_2" | "tokenizer" | "tokenizer_2" | "vae" | "vae_decoder" | "vae_encoder" | "scheduler" | "safety_checker"; @@ -9219,18 +8637,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * A * @description The first number @@ -9244,23 +8662,17 @@ export type components = { */ b?: number; /** - * Type + * type * @default sub - * @enum {string} + * @constant */ type: "sub"; }; /** T2IAdapterField */ T2IAdapterField: { - /** - * Image - * @description The T2I-Adapter image prompt. - */ + /** @description The T2I-Adapter image prompt. */ image: components["schemas"]["ImageField"]; - /** - * T2I Adapter Model - * @description The T2I-Adapter model to use. - */ + /** @description The T2I-Adapter model to use. */ t2i_adapter_model: components["schemas"]["T2IAdapterModelField"]; /** * Weight @@ -9303,22 +8715,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The IP-Adapter image prompt. - */ + use_cache?: boolean | null; + /** @description The IP-Adapter image prompt. */ image?: components["schemas"]["ImageField"]; /** * T2I-Adapter Model @@ -9351,9 +8760,9 @@ export type components = { */ resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; /** - * Type + * type * @default t2i_adapter - * @enum {string} + * @constant */ type: "t2i_adapter"; }; @@ -9364,19 +8773,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default t2i_adapter + * @constant */ model_type: "t2i_adapter"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** T2IAdapterModelField */ T2IAdapterModelField: { @@ -9388,12 +8798,7 @@ export type components = { /** @description Base model */ base_model: components["schemas"]["BaseModelType"]; }; - /** - * T2IAdapterOutput - * @description Base class for all invocation outputs. - * - * All invocation outputs must use the `@invocation_output` decorator to provide their unique type. - */ + /** T2IAdapterOutput */ T2IAdapterOutput: { /** * T2I Adapter @@ -9401,9 +8806,9 @@ export type components = { */ t2i_adapter: components["schemas"]["T2IAdapterField"]; /** - * Type + * type * @default t2i_adapter_output - * @enum {string} + * @constant */ type: "t2i_adapter_output"; }; @@ -9414,16 +8819,17 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default embedding + * @constant */ model_type: "embedding"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** Model Format */ model_format: null; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** * Tile Resample Processor @@ -9440,47 +8846,38 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default tile_image_processor - * @enum {string} - */ - type: "tile_image_processor"; /** * Down Sampling Rate * @description Down sampling rate * @default 1 */ down_sampling_rate?: number; + /** + * type + * @default tile_image_processor + * @constant + */ + type: "tile_image_processor"; }; /** UNetField */ UNetField: { - /** - * Unet - * @description Info to load unet submodel - */ + /** @description Info to load unet submodel */ unet: components["schemas"]["ModelInfo"]; - /** - * Scheduler - * @description Info to load scheduler submodel - */ + /** @description Info to load scheduler submodel */ scheduler: components["schemas"]["ModelInfo"]; /** * Loras @@ -9521,10 +8918,7 @@ export type components = { }; /** VaeField */ VaeField: { - /** - * Vae - * @description Info to load vae submodel - */ + /** @description Info to load vae submodel */ vae: components["schemas"]["ModelInfo"]; /** * Seamless Axes @@ -9547,27 +8941,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * VAE * @description VAE model to load */ vae_model: components["schemas"]["VAEModelField"]; /** - * Type + * type * @default vae_loader - * @enum {string} + * @constant */ type: "vae_loader"; }; @@ -9582,9 +8976,9 @@ export type components = { */ vae: components["schemas"]["VaeField"]; /** - * Type + * type * @default vae_loader_output - * @enum {string} + * @constant */ type: "vae_loader_output"; }; @@ -9595,19 +8989,19 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default vae + * @constant */ model_type: "vae"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; model_format: components["schemas"]["VaeModelFormat"]; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** * VaeModelFormat - * @description An enumeration. * @enum {string} */ VaeModelFormat: "checkpoint" | "diffusers"; @@ -9635,57 +9029,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default zoe_depth_image_processor - * @enum {string} + * @constant */ type: "zoe_depth_image_processor"; }; - /** - * UIConfigBase - * @description Provides additional node configuration to the UI. - * This is used internally by the @invocation decorator logic. Do not use this directly. - */ - UIConfigBase: { - /** - * Tags - * @description The node's tags - */ - tags?: string[]; - /** - * Title - * @description The node's display name - */ - title?: string; - /** - * Category - * @description The node's category - */ - category?: string; - /** - * Version - * @description The node's version. Should be a valid semver string e.g. "1.0.0" or "3.8.13". - */ - version?: string; - }; /** * Input * @description The type of input a field accepts. @@ -9695,6 +9059,42 @@ export type components = { * @enum {string} */ Input: "connection" | "direct" | "any"; + /** + * UIComponent + * @description The type of UI component to use for a field, used to override the default components, which are inferred from the field type. + * @enum {string} + */ + UIComponent: "none" | "textarea" | "slider"; + /** + * UIConfigBase + * @description Provides additional node configuration to the UI. + * This is used internally by the @invocation decorator logic. Do not use this directly. + */ + UIConfigBase: { + /** + * Tags + * @description The node's tags + */ + tags: string[] | null; + /** + * Title + * @description The node's display name + * @default null + */ + title: string | null; + /** + * Category + * @description The node's category + * @default null + */ + category: string | null; + /** + * Version + * @description The node's version. Should be a valid semver string e.g. "1.0.0" or "3.8.13". + * @default null + */ + version: string | null; + }; /** * UIType * @description Type hints for the UI. @@ -9702,12 +9102,6 @@ export type components = { * @enum {string} */ UIType: "boolean" | "ColorField" | "ConditioningField" | "ControlField" | "float" | "ImageField" | "integer" | "LatentsField" | "string" | "BooleanCollection" | "ColorCollection" | "ConditioningCollection" | "ControlCollection" | "FloatCollection" | "ImageCollection" | "IntegerCollection" | "LatentsCollection" | "StringCollection" | "BooleanPolymorphic" | "ColorPolymorphic" | "ConditioningPolymorphic" | "ControlPolymorphic" | "FloatPolymorphic" | "ImagePolymorphic" | "IntegerPolymorphic" | "LatentsPolymorphic" | "StringPolymorphic" | "MainModelField" | "SDXLMainModelField" | "SDXLRefinerModelField" | "ONNXModelField" | "VaeModelField" | "LoRAModelField" | "ControlNetModelField" | "IPAdapterModelField" | "UNetField" | "VaeField" | "ClipField" | "Collection" | "CollectionItem" | "enum" | "Scheduler" | "WorkflowField" | "IsIntermediate" | "MetadataField" | "BoardField"; - /** - * UIComponent - * @description The type of UI component to use for a field, used to override the default components, which are inferred from the field type. - * @enum {string} - */ - UIComponent: "none" | "textarea" | "slider"; /** * _InputField * @description *DO NOT USE* @@ -9719,16 +9113,16 @@ export type components = { input: components["schemas"]["Input"]; /** Ui Hidden */ ui_hidden: boolean; - ui_type?: components["schemas"]["UIType"]; - ui_component?: components["schemas"]["UIComponent"]; + ui_type: components["schemas"]["UIType"] | null; + ui_component: components["schemas"]["UIComponent"] | null; /** Ui Order */ - ui_order?: number; + ui_order: number | null; /** Ui Choice Labels */ - ui_choice_labels?: { + ui_choice_labels: { [key: string]: string; - }; + } | null; /** Item Default */ - item_default?: unknown; + item_default: unknown; }; /** * _OutputField @@ -9740,10 +9134,46 @@ export type components = { _OutputField: { /** Ui Hidden */ ui_hidden: boolean; - ui_type?: components["schemas"]["UIType"]; + ui_type: components["schemas"]["UIType"] | null; /** Ui Order */ - ui_order?: number; + ui_order: number | null; }; + /** + * StableDiffusionOnnxModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionOnnxModelFormat: "olive" | "onnx"; + /** + * StableDiffusion2ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusion1ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; + /** + * ControlNetModelFormat + * @description An enumeration. + * @enum {string} + */ + ControlNetModelFormat: "checkpoint" | "diffusers"; + /** + * CLIPVisionModelFormat + * @description An enumeration. + * @enum {string} + */ + CLIPVisionModelFormat: "diffusers"; + /** + * StableDiffusionXLModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; /** * IPAdapterModelFormat * @description An enumeration. @@ -9756,42 +9186,6 @@ export type components = { * @enum {string} */ T2IAdapterModelFormat: "diffusers"; - /** - * StableDiffusion2ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; - /** - * CLIPVisionModelFormat - * @description An enumeration. - * @enum {string} - */ - CLIPVisionModelFormat: "diffusers"; - /** - * ControlNetModelFormat - * @description An enumeration. - * @enum {string} - */ - ControlNetModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusionOnnxModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; - /** - * StableDiffusion1ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusionXLModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; @@ -9806,382 +9200,6 @@ export type external = Record; export type operations = { - /** - * List Sessions - * @deprecated - * @description Gets a list of sessions, optionally searching - */ - list_sessions: { - parameters: { - query?: { - /** @description The page of results to get */ - page?: number; - /** @description The number of results per page */ - per_page?: number; - /** @description The query string to search for */ - query?: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["PaginatedResults_GraphExecutionState_"]; - }; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Create Session - * @deprecated - * @description Creates a new session, optionally initializing it with an invocation graph - */ - create_session: { - parameters: { - query?: { - /** @description The id of the queue to associate the session with */ - queue_id?: string; - }; - }; - requestBody?: { - content: { - "application/json": components["schemas"]["Graph"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Invalid json */ - 400: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Get Session - * @deprecated - * @description Gets a session - */ - get_session: { - parameters: { - path: { - /** @description The id of the session to get */ - session_id: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Add Node - * @deprecated - * @description Adds a node to the graph - */ - add_node: { - parameters: { - path: { - /** @description The id of the session */ - session_id: string; - }; - }; - requestBody: { - content: { - "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": string; - }; - }; - /** @description Invalid node or link */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Update Node - * @deprecated - * @description Updates a node in the graph and removes all linked edges - */ - update_node: { - parameters: { - path: { - /** @description The id of the session */ - session_id: string; - /** @description The path to the node in the graph */ - node_path: string; - }; - }; - requestBody: { - content: { - "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Invalid node or link */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Delete Node - * @deprecated - * @description Deletes a node in the graph and removes all linked edges - */ - delete_node: { - parameters: { - path: { - /** @description The id of the session */ - session_id: string; - /** @description The path to the node to delete */ - node_path: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Invalid node or link */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Add Edge - * @deprecated - * @description Adds an edge to the graph - */ - add_edge: { - parameters: { - path: { - /** @description The id of the session */ - session_id: string; - }; - }; - requestBody: { - content: { - "application/json": components["schemas"]["Edge"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Invalid node or link */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Delete Edge - * @deprecated - * @description Deletes an edge from the graph - */ - delete_edge: { - parameters: { - path: { - /** @description The id of the session */ - session_id: string; - /** @description The id of the node the edge is coming from */ - from_node_id: string; - /** @description The field of the node the edge is coming from */ - from_field: string; - /** @description The id of the node the edge is going to */ - to_node_id: string; - /** @description The field of the node the edge is going to */ - to_field: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Invalid node or link */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Invoke Session - * @deprecated - * @description Invokes a session - */ - invoke_session: { - parameters: { - query: { - /** @description The id of the queue to associate the session with */ - queue_id: string; - /** @description Whether or not to invoke all remaining invocations */ - all?: boolean; - }; - path: { - /** @description The id of the session to invoke */ - session_id: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": unknown; - }; - }; - /** @description The invocation is queued */ - 202: { - content: never; - }; - /** @description The session has no invocations ready to invoke */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Cancel Session Invoke - * @deprecated - * @description Invokes a session - */ - cancel_session_invoke: { - parameters: { - path: { - /** @description The id of the session to cancel */ - session_id: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": unknown; - }; - }; - /** @description The invocation is canceled */ - 202: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; /** * Parse Dynamicprompts * @description Creates a batch process @@ -10215,9 +9233,9 @@ export type operations = { parameters: { query?: { /** @description Base models to include */ - base_models?: components["schemas"]["BaseModelType"][]; + base_models?: components["schemas"]["BaseModelType"][] | null; /** @description The type of model to get */ - model_type?: components["schemas"]["ModelType"]; + model_type?: components["schemas"]["ModelType"] | null; }; }; responses: { @@ -10400,7 +9418,7 @@ export type operations = { parameters: { query?: { /** @description Save the converted model to the designated directory */ - convert_dest_directory?: string; + convert_dest_directory?: string | null; }; path: { /** @description Base model */ @@ -10541,11 +9559,11 @@ export type operations = { /** @description Whether this is an intermediate image */ is_intermediate: boolean; /** @description The board to add this image to, if any */ - board_id?: string; + board_id?: string | null; /** @description The session ID associated with this upload, if any */ - session_id?: string; + session_id?: string | null; /** @description Whether to crop the image */ - crop_visible?: boolean; + crop_visible?: boolean | null; }; }; requestBody: { @@ -10664,7 +9682,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": unknown; + "application/json": number; }; }; }; @@ -10789,13 +9807,13 @@ export type operations = { parameters: { query?: { /** @description The origin of images to list. */ - image_origin?: components["schemas"]["ResourceOrigin"]; + image_origin?: components["schemas"]["ResourceOrigin"] | null; /** @description The categories of image to include. */ - categories?: components["schemas"]["ImageCategory"][]; + categories?: components["schemas"]["ImageCategory"][] | null; /** @description Whether to list intermediate images. */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** @description The board id to filter by. Use 'none' to find images without a board. */ - board_id?: string; + board_id?: string | null; /** @description The page offset */ offset?: number; /** @description The number of images per page */ @@ -10913,11 +9931,11 @@ export type operations = { parameters: { query?: { /** @description Whether to list all boards */ - all?: boolean; + all?: boolean | null; /** @description The page offset */ - offset?: number; + offset?: number | null; /** @description The number of boards per page */ - limit?: number; + limit?: number | null; }; }; responses: { @@ -10995,7 +10013,7 @@ export type operations = { parameters: { query?: { /** @description Permanently delete all images on the board */ - include_images?: boolean; + include_images?: boolean | null; }; path: { /** @description The id of board to delete */ @@ -11311,7 +10329,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": unknown; + "application/json": components["schemas"]["EnqueueGraphResult"]; }; }; /** @description Created */ @@ -11348,7 +10366,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": unknown; + "application/json": components["schemas"]["EnqueueBatchResult"]; }; }; /** @description Created */ @@ -11375,9 +10393,9 @@ export type operations = { /** @description The number of items to fetch */ limit?: number; /** @description The status of items to fetch */ - status?: "pending" | "in_progress" | "completed" | "failed" | "canceled"; + status?: ("pending" | "in_progress" | "completed" | "failed" | "canceled") | null; /** @description The pagination cursor */ - cursor?: number; + cursor?: number | null; /** @description The pagination cursor priority */ priority?: number; }; @@ -11551,7 +10569,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["SessionQueueItem"]; + "application/json": components["schemas"]["SessionQueueItem"] | null; }; }; /** @description Validation Error */ @@ -11577,7 +10595,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["SessionQueueItem"]; + "application/json": components["schemas"]["SessionQueueItem"] | null; }; }; /** @description Validation Error */ diff --git a/pyproject.toml b/pyproject.toml index bab87172c2..4c8ec0f5e8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,10 +35,10 @@ dependencies = [ "accelerate~=0.23.0", "albumentations", "click", - "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", + "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", "compel~=2.0.2", "controlnet-aux>=0.0.6", - "timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26 + "timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26 "datasets", # When bumping diffusers beyond 0.21, make sure to address this: # https://github.com/invoke-ai/InvokeAI/blob/fc09ab7e13cb7ca5389100d149b6422ace7b8ed3/invokeai/app/invocations/latent.py#L513 @@ -48,19 +48,20 @@ dependencies = [ "easing-functions", "einops", "facexlib", - "fastapi==0.88.0", - "fastapi-events==0.8.0", + "fastapi~=0.103.2", + "fastapi-events~=0.9.1", "huggingface-hub~=0.16.4", - "invisible-watermark~=0.2.0", # needed to install SDXL base and refiner using their repo_ids - "matplotlib", # needed for plotting of Penner easing functions - "mediapipe", # needed for "mediapipeface" controlnet model + "invisible-watermark~=0.2.0", # needed to install SDXL base and refiner using their repo_ids + "matplotlib", # needed for plotting of Penner easing functions + "mediapipe", # needed for "mediapipeface" controlnet model "numpy", "npyscreen", "omegaconf", "onnx", "onnxruntime", "opencv-python", - "pydantic==1.*", + "pydantic~=2.4.2", + "pydantic-settings~=2.0.3", "picklescan", "pillow", "prompt-toolkit", @@ -95,33 +96,25 @@ dependencies = [ "mkdocs-git-revision-date-localized-plugin", "mkdocs-redirects==1.2.0", ] -"dev" = [ - "jurigged", - "pudb", -] +"dev" = ["jurigged", "pudb"] "test" = [ "black", "flake8", "Flake8-pyproject", "isort", + "mypy", "pre-commit", "pytest>6.0.0", "pytest-cov", "pytest-datadir", ] "xformers" = [ - "xformers~=0.0.19; sys_platform!='darwin'", - "triton; sys_platform=='linux'", -] -"onnx" = [ - "onnxruntime", -] -"onnx-cuda" = [ - "onnxruntime-gpu", -] -"onnx-directml" = [ - "onnxruntime-directml", + "xformers~=0.0.19; sys_platform!='darwin'", + "triton; sys_platform=='linux'", ] +"onnx" = ["onnxruntime"] +"onnx-cuda" = ["onnxruntime-gpu"] +"onnx-directml" = ["onnxruntime-directml"] [project.scripts] @@ -163,12 +156,15 @@ version = { attr = "invokeai.version.__version__" } [tool.setuptools.packages.find] "where" = ["."] "include" = [ - "invokeai.assets.fonts*","invokeai.version*", - "invokeai.generator*","invokeai.backend*", - "invokeai.frontend*", "invokeai.frontend.web.dist*", - "invokeai.frontend.web.static*", - "invokeai.configs*", - "invokeai.app*", + "invokeai.assets.fonts*", + "invokeai.version*", + "invokeai.generator*", + "invokeai.backend*", + "invokeai.frontend*", + "invokeai.frontend.web.dist*", + "invokeai.frontend.web.static*", + "invokeai.configs*", + "invokeai.app*", ] [tool.setuptools.package-data] @@ -182,7 +178,7 @@ version = { attr = "invokeai.version.__version__" } [tool.pytest.ini_options] addopts = "--cov-report term --cov-report html --cov-report xml --strict-markers -m \"not slow\"" markers = [ - "slow: Marks tests as slow. Disabled by default. To run all tests, use -m \"\". To run only slow tests, use -m \"slow\"." + "slow: Marks tests as slow. Disabled by default. To run all tests, use -m \"\". To run only slow tests, use -m \"slow\".", ] [tool.coverage.run] branch = true @@ -190,7 +186,7 @@ source = ["invokeai"] omit = ["*tests*", "*migrations*", ".venv/*", "*.env"] [tool.coverage.report] show_missing = true -fail_under = 85 # let's set something sensible on Day 1 ... +fail_under = 85 # let's set something sensible on Day 1 ... [tool.coverage.json] output = "coverage/coverage.json" pretty_print = true @@ -209,7 +205,7 @@ exclude = [ "__pycache__", "build", "dist", - "invokeai/frontend/web/node_modules/" + "invokeai/frontend/web/node_modules/", ] [tool.black] @@ -218,3 +214,53 @@ line-length = 120 [tool.isort] profile = "black" line_length = 120 + +[tool.mypy] +ignore_missing_imports = true # ignores missing types in third-party libraries + +[[tool.mypy.overrides]] +follow_imports = "skip" +module = [ + "invokeai.app.api.routers.models", + "invokeai.app.invocations.compel", + "invokeai.app.invocations.latent", + "invokeai.app.services.config.config_base", + "invokeai.app.services.config.config_default", + "invokeai.app.services.invocation_stats.invocation_stats_default", + "invokeai.app.services.model_manager.model_manager_base", + "invokeai.app.services.model_manager.model_manager_default", + "invokeai.app.util.controlnet_utils", + "invokeai.backend.image_util.txt2mask", + "invokeai.backend.image_util.safety_checker", + "invokeai.backend.image_util.patchmatch", + "invokeai.backend.image_util.invisible_watermark", + "invokeai.backend.install.model_install_backend", + "invokeai.backend.ip_adapter.ip_adapter", + "invokeai.backend.ip_adapter.resampler", + "invokeai.backend.ip_adapter.unet_patcher", + "invokeai.backend.model_management.convert_ckpt_to_diffusers", + "invokeai.backend.model_management.lora", + "invokeai.backend.model_management.model_cache", + "invokeai.backend.model_management.model_manager", + "invokeai.backend.model_management.model_merge", + "invokeai.backend.model_management.model_probe", + "invokeai.backend.model_management.model_search", + "invokeai.backend.model_management.models.*", # this is needed to ignore the module's `__init__.py` + "invokeai.backend.model_management.models.base", + "invokeai.backend.model_management.models.controlnet", + "invokeai.backend.model_management.models.ip_adapter", + "invokeai.backend.model_management.models.lora", + "invokeai.backend.model_management.models.sdxl", + "invokeai.backend.model_management.models.stable_diffusion", + "invokeai.backend.model_management.models.vae", + "invokeai.backend.model_management.seamless", + "invokeai.backend.model_management.util", + "invokeai.backend.stable_diffusion.diffusers_pipeline", + "invokeai.backend.stable_diffusion.diffusion.cross_attention_control", + "invokeai.backend.stable_diffusion.diffusion.shared_invokeai_diffusion", + "invokeai.backend.util.hotfixes", + "invokeai.backend.util.logging", + "invokeai.backend.util.mps_fixes", + "invokeai.backend.util.util", + "invokeai.frontend.install.model_install", +] diff --git a/tests/nodes/test_node_graph.py b/tests/nodes/test_node_graph.py index 822ffc1588..3c965895f9 100644 --- a/tests/nodes/test_node_graph.py +++ b/tests/nodes/test_node_graph.py @@ -1,4 +1,5 @@ import pytest +from pydantic import TypeAdapter from invokeai.app.invocations.baseinvocation import ( BaseInvocation, @@ -593,20 +594,21 @@ def test_graph_can_serialize(): g.add_edge(e) # Not throwing on this line is sufficient - _ = g.json() + _ = g.model_dump_json() def test_graph_can_deserialize(): g = Graph() n1 = TextToImageTestInvocation(id="1", prompt="Banana sushi") - n2 = ESRGANInvocation(id="2") + n2 = ImageToImageTestInvocation(id="2") g.add_node(n1) g.add_node(n2) e = create_edge(n1.id, "image", n2.id, "image") g.add_edge(e) - json = g.json() - g2 = Graph.parse_raw(json) + json = g.model_dump_json() + adapter_graph = TypeAdapter(Graph) + g2 = adapter_graph.validate_json(json) assert g2 is not None assert g2.nodes["1"] is not None @@ -619,7 +621,7 @@ def test_graph_can_deserialize(): def test_invocation_decorator(): - invocation_type = "test_invocation" + invocation_type = "test_invocation_decorator" title = "Test Invocation" tags = ["first", "second", "third"] category = "category" @@ -630,7 +632,7 @@ def test_invocation_decorator(): def invoke(self): pass - schema = TestInvocation.schema() + schema = TestInvocation.model_json_schema() assert schema.get("title") == title assert schema.get("tags") == tags @@ -640,18 +642,17 @@ def test_invocation_decorator(): def test_invocation_version_must_be_semver(): - invocation_type = "test_invocation" valid_version = "1.0.0" invalid_version = "not_semver" - @invocation(invocation_type, version=valid_version) + @invocation("test_invocation_version_valid", version=valid_version) class ValidVersionInvocation(BaseInvocation): def invoke(self): pass with pytest.raises(InvalidVersionError): - @invocation(invocation_type, version=invalid_version) + @invocation("test_invocation_version_invalid", version=invalid_version) class InvalidVersionInvocation(BaseInvocation): def invoke(self): pass @@ -694,4 +695,4 @@ def test_ints_do_not_accept_floats(): def test_graph_can_generate_schema(): # Not throwing on this line is sufficient # NOTE: if this test fails, it's PROBABLY because a new invocation type is breaking schema generation - _ = Graph.schema_json(indent=2) + _ = Graph.model_json_schema() diff --git a/tests/nodes/test_session_queue.py b/tests/nodes/test_session_queue.py index 6dd7c4845a..731316068c 100644 --- a/tests/nodes/test_session_queue.py +++ b/tests/nodes/test_session_queue.py @@ -1,5 +1,5 @@ import pytest -from pydantic import ValidationError, parse_raw_as +from pydantic import TypeAdapter, ValidationError from invokeai.app.services.session_queue.session_queue_common import ( Batch, @@ -150,8 +150,9 @@ def test_prepare_values_to_insert(batch_data_collection, batch_graph): values = prepare_values_to_insert(queue_id="default", batch=b, priority=0, max_new_queue_items=1000) assert len(values) == 8 + session_adapter = TypeAdapter(GraphExecutionState) # graph should be serialized - ges = parse_raw_as(GraphExecutionState, values[0].session) + ges = session_adapter.validate_json(values[0].session) # graph values should be populated assert ges.graph.get_node("1").prompt == "Banana sushi" @@ -160,15 +161,16 @@ def test_prepare_values_to_insert(batch_data_collection, batch_graph): assert ges.graph.get_node("4").prompt == "Nissan" # session ids should match deserialized graph - assert [v.session_id for v in values] == [parse_raw_as(GraphExecutionState, v.session).id for v in values] + assert [v.session_id for v in values] == [session_adapter.validate_json(v.session).id for v in values] # should unique session ids sids = [v.session_id for v in values] assert len(sids) == len(set(sids)) + nfv_list_adapter = TypeAdapter(list[NodeFieldValue]) # should have 3 node field values assert type(values[0].field_values) is str - assert len(parse_raw_as(list[NodeFieldValue], values[0].field_values)) == 3 + assert len(nfv_list_adapter.validate_json(values[0].field_values)) == 3 # should have batch id and priority assert all(v.batch_id == b.batch_id for v in values) diff --git a/tests/nodes/test_sqlite.py b/tests/nodes/test_sqlite.py index 6e4da8b36e..818f9d048f 100644 --- a/tests/nodes/test_sqlite.py +++ b/tests/nodes/test_sqlite.py @@ -15,7 +15,8 @@ class TestModel(BaseModel): @pytest.fixture def db() -> SqliteItemStorage[TestModel]: sqlite_db = SqliteDatabase(InvokeAIAppConfig(use_memory_db=True), InvokeAILogger.get_logger()) - return SqliteItemStorage[TestModel](db=sqlite_db, table_name="test", id_field="id") + sqlite_item_storage = SqliteItemStorage[TestModel](db=sqlite_db, table_name="test", id_field="id") + return sqlite_item_storage def test_sqlite_service_can_create_and_get(db: SqliteItemStorage[TestModel]):