From 19c5435332d219f0fa6b89fdb334e37cb585b1f8 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 16 Oct 2023 15:11:46 +1100 Subject: [PATCH 01/24] fix(ui): copy image via img onload to blob There's a bug in chrome that screws with headers on fetch requests and 307 responses. This causes images to fail to copy in the commercial environment. This change attempts to get around this by copying images in a different way (similar to how the canvas works). When the user requests a copy we: - create an `` element - set `crossOrigin` if needed - add an onload handler: - create a canvas element - draw image onto it - export canvas to blob This is wrapped in a promise which resolves to the blob, which can then be copied to clipboard. --- A customized version of Konva's `useImage` hook is also included, which returns the image blob in addition to the `` element. Unfortunately, this hook is not suitable for use across the app, because it does all the image fetching up front, regardless of whether we actually want to copy the image. In other words, we'd have to fetch the whole image file even if the user is just skipping through image metadata, in order to have the blob to copy. The callback approach means we only fetch the image when the user clicks copy. The hook is thus currently unused. --- .../frontend/web/src/common/hooks/useImage.ts | 102 ++++++++++++++++++ .../web/src/common/hooks/useImageUrlToBlob.ts | 40 +++++++ .../system/util/copyBlobToClipboard.ts | 2 +- .../ui/hooks/useCopyImageToClipboard.ts | 20 ++-- 4 files changed, 153 insertions(+), 11 deletions(-) create mode 100644 invokeai/frontend/web/src/common/hooks/useImage.ts create mode 100644 invokeai/frontend/web/src/common/hooks/useImageUrlToBlob.ts diff --git a/invokeai/frontend/web/src/common/hooks/useImage.ts b/invokeai/frontend/web/src/common/hooks/useImage.ts new file mode 100644 index 0000000000..60c973ce59 --- /dev/null +++ b/invokeai/frontend/web/src/common/hooks/useImage.ts @@ -0,0 +1,102 @@ +import { useLayoutEffect, useRef, useState } from 'react'; + +// Adapted from https://github.com/konvajs/use-image + +type CrossOrigin = 'anonymous' | 'use-credentials'; +type ReferrerPolicy = + | 'no-referrer' + | 'no-referrer-when-downgrade' + | 'origin' + | 'origin-when-cross-origin' + | 'same-origin' + | 'strict-origin' + | 'strict-origin-when-cross-origin' + | 'unsafe-url'; +type ImageStatus = 'loaded' | 'loading' | 'failed'; + +export const useImage = ( + url: string, + crossOrigin?: CrossOrigin, + referrerpolicy?: ReferrerPolicy +): [undefined | HTMLImageElement, ImageStatus, Blob | null] => { + // lets use refs for image and status + // so we can update them during render + // to have instant update in status/image when new data comes in + const statusRef = useRef('loading'); + const imageRef = useRef(); + const blobRef = useRef(null); + + // we are not going to use token + // but we need to just to trigger state update + const [_, setStateToken] = useState(0); + + // keep track of old props to trigger changes + const oldUrl = useRef(); + const oldCrossOrigin = useRef(); + const oldReferrerPolicy = useRef(); + + if ( + oldUrl.current !== url || + oldCrossOrigin.current !== crossOrigin || + oldReferrerPolicy.current !== referrerpolicy + ) { + statusRef.current = 'loading'; + imageRef.current = undefined; + oldUrl.current = url; + oldCrossOrigin.current = crossOrigin; + oldReferrerPolicy.current = referrerpolicy; + } + + useLayoutEffect( + function () { + if (!url) { + return; + } + const img = document.createElement('img'); + + function onload() { + statusRef.current = 'loaded'; + imageRef.current = img; + const canvas = document.createElement('canvas'); + canvas.width = img.clientWidth; + canvas.height = img.clientHeight; + + const context = canvas.getContext('2d'); + if (context) { + context.drawImage(img, 0, 0); + canvas.toBlob(function (blob) { + blobRef.current = blob; + }, 'image/png'); + } + setStateToken(Math.random()); + } + + function onerror() { + statusRef.current = 'failed'; + imageRef.current = undefined; + setStateToken(Math.random()); + } + + img.addEventListener('load', onload); + img.addEventListener('error', onerror); + if (crossOrigin) { + img.crossOrigin = crossOrigin; + } + if (referrerpolicy) { + img.referrerPolicy = referrerpolicy; + } + img.src = url; + + return function cleanup() { + img.removeEventListener('load', onload); + img.removeEventListener('error', onerror); + }; + }, + [url, crossOrigin, referrerpolicy] + ); + + // return array because it is better to use in case of several useImage hooks + // const [background, backgroundStatus] = useImage(url1); + // const [patter] = useImage(url2); + return [imageRef.current, statusRef.current, blobRef.current]; +}; diff --git a/invokeai/frontend/web/src/common/hooks/useImageUrlToBlob.ts b/invokeai/frontend/web/src/common/hooks/useImageUrlToBlob.ts new file mode 100644 index 0000000000..77538a929d --- /dev/null +++ b/invokeai/frontend/web/src/common/hooks/useImageUrlToBlob.ts @@ -0,0 +1,40 @@ +import { useCallback } from 'react'; +import { $authToken } from 'services/api/client'; + +/** + * Converts an image URL to a Blob by creating an element, drawing it to canvas + * and then converting the canvas to a Blob. + * + * @returns A function that takes a URL and returns a Promise that resolves with a Blob + */ +export const useImageUrlToBlob = () => { + const imageUrlToBlob = useCallback( + async (url: string) => + new Promise((resolve) => { + const img = new Image(); + img.onload = () => { + const canvas = document.createElement('canvas'); + canvas.width = img.width; + canvas.height = img.height; + + const context = canvas.getContext('2d'); + if (!context) { + return; + } + context.drawImage(img, 0, 0); + resolve( + new Promise((resolve) => { + canvas.toBlob(function (blob) { + resolve(blob); + }, 'image/png'); + }) + ); + }; + img.crossOrigin = $authToken.get() ? 'use-credentials' : 'anonymous'; + img.src = url; + }), + [] + ); + + return imageUrlToBlob; +}; diff --git a/invokeai/frontend/web/src/features/system/util/copyBlobToClipboard.ts b/invokeai/frontend/web/src/features/system/util/copyBlobToClipboard.ts index cf59f2a687..b5e896f3bf 100644 --- a/invokeai/frontend/web/src/features/system/util/copyBlobToClipboard.ts +++ b/invokeai/frontend/web/src/features/system/util/copyBlobToClipboard.ts @@ -2,7 +2,7 @@ * Copies a blob to the clipboard by calling navigator.clipboard.write(). */ export const copyBlobToClipboard = ( - blob: Promise, + blob: Promise | Blob, type = 'image/png' ) => { navigator.clipboard.write([ diff --git a/invokeai/frontend/web/src/features/ui/hooks/useCopyImageToClipboard.ts b/invokeai/frontend/web/src/features/ui/hooks/useCopyImageToClipboard.ts index 4b42a45e93..ef9db44a9d 100644 --- a/invokeai/frontend/web/src/features/ui/hooks/useCopyImageToClipboard.ts +++ b/invokeai/frontend/web/src/features/ui/hooks/useCopyImageToClipboard.ts @@ -1,11 +1,13 @@ import { useAppToaster } from 'app/components/Toaster'; +import { useImageUrlToBlob } from 'common/hooks/useImageUrlToBlob'; +import { copyBlobToClipboard } from 'features/system/util/copyBlobToClipboard'; import { useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import { copyBlobToClipboard } from 'features/system/util/copyBlobToClipboard'; export const useCopyImageToClipboard = () => { const toaster = useAppToaster(); const { t } = useTranslation(); + const imageUrlToBlob = useImageUrlToBlob(); const isClipboardAPIAvailable = useMemo(() => { return Boolean(navigator.clipboard) && Boolean(window.ClipboardItem); @@ -23,15 +25,13 @@ export const useCopyImageToClipboard = () => { }); } try { - const getImageBlob = async () => { - const response = await fetch(image_url); - if (!response.ok) { - throw new Error(`Problem retrieving image data`); - } - return await response.blob(); - }; + const blob = await imageUrlToBlob(image_url); - copyBlobToClipboard(getImageBlob()); + if (!blob) { + throw new Error('Unable to create Blob'); + } + + copyBlobToClipboard(blob); toaster({ title: t('toast.imageCopied'), @@ -49,7 +49,7 @@ export const useCopyImageToClipboard = () => { }); } }, - [isClipboardAPIAvailable, t, toaster] + [imageUrlToBlob, isClipboardAPIAvailable, t, toaster] ); return { isClipboardAPIAvailable, copyImageToClipboard }; From c238a7f18b47bbdad9bc0489ef250786bce94424 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sun, 24 Sep 2023 18:11:07 +1000 Subject: [PATCH 02/24] feat(api): chore: pydantic & fastapi upgrade Upgrade pydantic and fastapi to latest. - pydantic~=2.4.2 - fastapi~=103.2 - fastapi-events~=0.9.1 **Big Changes** There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes. **Invocations** The biggest change relates to invocation creation, instantiation and validation. Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie. Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`. With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation. This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method. In the end, this implementation is cleaner. **Invocation Fields** In pydantic v2, you can no longer directly add or remove fields from a model. Previously, we did this to add the `type` field to invocations. **Invocation Decorators** With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper. A similar technique is used for `invocation_output()`. **Minor Changes** There are a number of minor changes around the pydantic v2 models API. **Protected `model_` Namespace** All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_". Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple. ```py class IPAdapterModelField(BaseModel): model_name: str = Field(description="Name of the IP-Adapter model") base_model: BaseModelType = Field(description="Base model") model_config = ConfigDict(protected_namespaces=()) ``` **Model Serialization** Pydantic models no longer have `Model.dict()` or `Model.json()`. Instead, we use `Model.model_dump()` or `Model.model_dump_json()`. **Model Deserialization** Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions. Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model. ```py adapter_graph = TypeAdapter(Graph) deserialized_graph_from_json = adapter_graph.validate_json(graph_json) deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict) ``` **Field Customisation** Pydantic `Field`s no longer accept arbitrary args. Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field. **Schema Customisation** FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec. This necessitates two changes: - Our schema customization logic has been revised - Schema parsing to build node templates has been revised The specific aren't important, but this does present additional surface area for bugs. **Performance Improvements** Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node. I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster. --- invokeai/app/api/routers/images.py | 2 +- invokeai/app/api/routers/models.py | 109 +- invokeai/app/api/routers/utilities.py | 3 +- invokeai/app/api_app.py | 53 +- invokeai/app/cli/commands.py | 11 +- invokeai/app/invocations/baseinvocation.py | 553 +-- invokeai/app/invocations/collections.py | 8 +- invokeai/app/invocations/compel.py | 70 +- .../controlnet_image_processors.py | 24 +- invokeai/app/invocations/facetools.py | 4 +- invokeai/app/invocations/image.py | 154 +- invokeai/app/invocations/ip_adapter.py | 6 +- invokeai/app/invocations/latent.py | 149 +- invokeai/app/invocations/math.py | 15 +- invokeai/app/invocations/metadata.py | 2 +- invokeai/app/invocations/model.py | 78 +- invokeai/app/invocations/noise.py | 14 +- invokeai/app/invocations/onnx.py | 30 +- invokeai/app/invocations/param_easing.py | 31 +- invokeai/app/invocations/prompt.py | 31 +- invokeai/app/invocations/t2i_adapter.py | 4 +- invokeai/app/invocations/upscale.py | 3 + .../board_records/board_records_common.py | 12 +- invokeai/app/services/boards/boards_common.py | 2 +- invokeai/app/services/config/config_base.py | 47 +- .../app/services/config/config_default.py | 138 +- invokeai/app/services/events/events_base.py | 8 +- .../services/image_files/image_files_base.py | 3 +- .../image_records/image_records_base.py | 14 +- .../image_records/image_records_common.py | 8 +- .../image_records/image_records_sqlite.py | 20 +- invokeai/app/services/images/images_base.py | 2 +- invokeai/app/services/images/images_common.py | 6 +- .../app/services/images/images_default.py | 6 +- .../invocation_cache_memory.py | 9 +- .../invocation_processor_default.py | 10 +- .../invocation_stats_default.py | 2 +- .../item_storage/item_storage_sqlite.py | 17 +- .../model_manager/model_manager_base.py | 2 +- .../model_manager/model_manager_default.py | 2 +- .../session_queue/session_queue_common.py | 74 +- .../session_queue/session_queue_sqlite.py | 16 +- .../app/services/shared/default_graphs.py | 6 +- invokeai/app/services/shared/graph.py | 70 +- invokeai/app/services/shared/pagination.py | 7 +- invokeai/app/util/controlnet_utils.py | 2 +- invokeai/app/util/misc.py | 6 + invokeai/app/util/model_exclude_null.py | 4 +- invokeai/assets/__init__.py | 0 invokeai/backend/image_util/txt2mask.py | 14 +- invokeai/backend/image_util/util.py | 2 +- .../backend/install/invokeai_configure.py | 6 +- .../backend/model_management/model_manager.py | 16 +- .../model_management/models/__init__.py | 16 +- .../backend/model_management/models/base.py | 13 +- .../model_management/models/ip_adapter.py | 6 +- invokeai/backend/model_management/seamless.py | 8 +- .../diffusion/cross_attention_map_saving.py | 12 +- .../listeners/imageDeleted.ts | 4 +- .../components/IAIMantineMultiSelect.tsx | 2 +- .../components/IAIMantineSearchableSelect.tsx | 2 +- .../common/components/IAIMantineSelect.tsx | 2 +- .../store/dynamicPromptsSlice.ts | 5 +- .../web/src/features/nodes/types/types.ts | 64 +- .../nodes/util/fieldTemplateBuilders.ts | 232 +- .../src/features/nodes/util/parseSchema.ts | 13 +- .../queue/components/common/QueueItemCard.tsx | 2 +- .../subpanels/MergeModelsPanel.tsx | 4 +- .../web/src/services/api/endpoints/images.ts | 4 +- .../frontend/web/src/services/api/schema.d.ts | 3486 ++++++----------- pyproject.toml | 110 +- tests/nodes/test_node_graph.py | 21 +- tests/nodes/test_session_queue.py | 10 +- tests/nodes/test_sqlite.py | 3 +- 74 files changed, 2788 insertions(+), 3116 deletions(-) create mode 100644 invokeai/assets/__init__.py diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py index 7b61887eb8..43a72943ee 100644 --- a/invokeai/app/api/routers/images.py +++ b/invokeai/app/api/routers/images.py @@ -42,7 +42,7 @@ async def upload_image( crop_visible: Optional[bool] = Query(default=False, description="Whether to crop the image"), ) -> ImageDTO: """Uploads an image""" - if not file.content_type.startswith("image"): + if not file.content_type or not file.content_type.startswith("image"): raise HTTPException(status_code=415, detail="Not an image") contents = await file.read() diff --git a/invokeai/app/api/routers/models.py b/invokeai/app/api/routers/models.py index a7b1f81252..018f3af02b 100644 --- a/invokeai/app/api/routers/models.py +++ b/invokeai/app/api/routers/models.py @@ -2,11 +2,11 @@ import pathlib -from typing import List, Literal, Optional, Union +from typing import Annotated, List, Literal, Optional, Union from fastapi import Body, Path, Query, Response from fastapi.routing import APIRouter -from pydantic import BaseModel, parse_obj_as +from pydantic import BaseModel, ConfigDict, Field, TypeAdapter from starlette.exceptions import HTTPException from invokeai.backend import BaseModelType, ModelType @@ -23,8 +23,14 @@ from ..dependencies import ApiDependencies models_router = APIRouter(prefix="/v1/models", tags=["models"]) UpdateModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] +update_models_response_adapter = TypeAdapter(UpdateModelResponse) + ImportModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] +import_models_response_adapter = TypeAdapter(ImportModelResponse) + ConvertModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] +convert_models_response_adapter = TypeAdapter(ConvertModelResponse) + MergeModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] ImportModelAttributes = Union[tuple(OPENAPI_MODEL_CONFIGS)] @@ -32,6 +38,11 @@ ImportModelAttributes = Union[tuple(OPENAPI_MODEL_CONFIGS)] class ModelsList(BaseModel): models: list[Union[tuple(OPENAPI_MODEL_CONFIGS)]] + model_config = ConfigDict(use_enum_values=True) + + +models_list_adapter = TypeAdapter(ModelsList) + @models_router.get( "/", @@ -49,7 +60,7 @@ async def list_models( models_raw.extend(ApiDependencies.invoker.services.model_manager.list_models(base_model, model_type)) else: models_raw = ApiDependencies.invoker.services.model_manager.list_models(None, model_type) - models = parse_obj_as(ModelsList, {"models": models_raw}) + models = models_list_adapter.validate_python({"models": models_raw}) return models @@ -105,11 +116,14 @@ async def update_model( info.path = new_info.get("path") # replace empty string values with None/null to avoid phenomenon of vae: '' - info_dict = info.dict() + info_dict = info.model_dump() info_dict = {x: info_dict[x] if info_dict[x] else None for x in info_dict.keys()} ApiDependencies.invoker.services.model_manager.update_model( - model_name=model_name, base_model=base_model, model_type=model_type, model_attributes=info_dict + model_name=model_name, + base_model=base_model, + model_type=model_type, + model_attributes=info_dict, ) model_raw = ApiDependencies.invoker.services.model_manager.list_model( @@ -117,7 +131,7 @@ async def update_model( base_model=base_model, model_type=model_type, ) - model_response = parse_obj_as(UpdateModelResponse, model_raw) + model_response = update_models_response_adapter.validate_python(model_raw) except ModelNotFoundException as e: raise HTTPException(status_code=404, detail=str(e)) except ValueError as e: @@ -159,7 +173,8 @@ async def import_model( try: installed_models = ApiDependencies.invoker.services.model_manager.heuristic_import( - items_to_import=items_to_import, prediction_type_helper=lambda x: prediction_types.get(prediction_type) + items_to_import=items_to_import, + prediction_type_helper=lambda x: prediction_types.get(prediction_type), ) info = installed_models.get(location) @@ -171,7 +186,7 @@ async def import_model( model_raw = ApiDependencies.invoker.services.model_manager.list_model( model_name=info.name, base_model=info.base_model, model_type=info.model_type ) - return parse_obj_as(ImportModelResponse, model_raw) + return import_models_response_adapter.validate_python(model_raw) except ModelNotFoundException as e: logger.error(str(e)) @@ -205,13 +220,18 @@ async def add_model( try: ApiDependencies.invoker.services.model_manager.add_model( - info.model_name, info.base_model, info.model_type, model_attributes=info.dict() + info.model_name, + info.base_model, + info.model_type, + model_attributes=info.model_dump(), ) logger.info(f"Successfully added {info.model_name}") model_raw = ApiDependencies.invoker.services.model_manager.list_model( - model_name=info.model_name, base_model=info.base_model, model_type=info.model_type + model_name=info.model_name, + base_model=info.base_model, + model_type=info.model_type, ) - return parse_obj_as(ImportModelResponse, model_raw) + return import_models_response_adapter.validate_python(model_raw) except ModelNotFoundException as e: logger.error(str(e)) raise HTTPException(status_code=404, detail=str(e)) @@ -223,7 +243,10 @@ async def add_model( @models_router.delete( "/{base_model}/{model_type}/{model_name}", operation_id="del_model", - responses={204: {"description": "Model deleted successfully"}, 404: {"description": "Model not found"}}, + responses={ + 204: {"description": "Model deleted successfully"}, + 404: {"description": "Model not found"}, + }, status_code=204, response_model=None, ) @@ -279,7 +302,7 @@ async def convert_model( model_raw = ApiDependencies.invoker.services.model_manager.list_model( model_name, base_model=base_model, model_type=model_type ) - response = parse_obj_as(ConvertModelResponse, model_raw) + response = convert_models_response_adapter.validate_python(model_raw) except ModelNotFoundException as e: raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found: {str(e)}") except ValueError as e: @@ -302,7 +325,8 @@ async def search_for_models( ) -> List[pathlib.Path]: if not search_path.is_dir(): raise HTTPException( - status_code=404, detail=f"The search path '{search_path}' does not exist or is not directory" + status_code=404, + detail=f"The search path '{search_path}' does not exist or is not directory", ) return ApiDependencies.invoker.services.model_manager.search_for_models(search_path) @@ -337,6 +361,26 @@ async def sync_to_config() -> bool: return True +# There's some weird pydantic-fastapi behaviour that requires this to be a separate class +# TODO: After a few updates, see if it works inside the route operation handler? +class MergeModelsBody(BaseModel): + model_names: List[str] = Field(description="model name", min_length=2, max_length=3) + merged_model_name: Optional[str] = Field(description="Name of destination model") + alpha: Optional[float] = Field(description="Alpha weighting strength to apply to 2d and 3d models", default=0.5) + interp: Optional[MergeInterpolationMethod] = Field(description="Interpolation method") + force: Optional[bool] = Field( + description="Force merging of models created with different versions of diffusers", + default=False, + ) + + merge_dest_directory: Optional[str] = Field( + description="Save the merged model to the designated directory (with 'merged_model_name' appended)", + default=None, + ) + + model_config = ConfigDict(protected_namespaces=()) + + @models_router.put( "/merge/{base_model}", operation_id="merge_models", @@ -349,31 +393,23 @@ async def sync_to_config() -> bool: response_model=MergeModelResponse, ) async def merge_models( + body: Annotated[MergeModelsBody, Body(description="Model configuration", embed=True)], base_model: BaseModelType = Path(description="Base model"), - model_names: List[str] = Body(description="model name", min_items=2, max_items=3), - merged_model_name: Optional[str] = Body(description="Name of destination model"), - alpha: Optional[float] = Body(description="Alpha weighting strength to apply to 2d and 3d models", default=0.5), - interp: Optional[MergeInterpolationMethod] = Body(description="Interpolation method"), - force: Optional[bool] = Body( - description="Force merging of models created with different versions of diffusers", default=False - ), - merge_dest_directory: Optional[str] = Body( - description="Save the merged model to the designated directory (with 'merged_model_name' appended)", - default=None, - ), ) -> MergeModelResponse: """Convert a checkpoint model into a diffusers model""" logger = ApiDependencies.invoker.services.logger try: - logger.info(f"Merging models: {model_names} into {merge_dest_directory or ''}/{merged_model_name}") - dest = pathlib.Path(merge_dest_directory) if merge_dest_directory else None + logger.info( + f"Merging models: {body.model_names} into {body.merge_dest_directory or ''}/{body.merged_model_name}" + ) + dest = pathlib.Path(body.merge_dest_directory) if body.merge_dest_directory else None result = ApiDependencies.invoker.services.model_manager.merge_models( - model_names, - base_model, - merged_model_name=merged_model_name or "+".join(model_names), - alpha=alpha, - interp=interp, - force=force, + model_names=body.model_names, + base_model=base_model, + merged_model_name=body.merged_model_name or "+".join(body.model_names), + alpha=body.alpha, + interp=body.interp, + force=body.force, merge_dest_directory=dest, ) model_raw = ApiDependencies.invoker.services.model_manager.list_model( @@ -381,9 +417,12 @@ async def merge_models( base_model=base_model, model_type=ModelType.Main, ) - response = parse_obj_as(ConvertModelResponse, model_raw) + response = convert_models_response_adapter.validate_python(model_raw) except ModelNotFoundException: - raise HTTPException(status_code=404, detail=f"One or more of the models '{model_names}' not found") + raise HTTPException( + status_code=404, + detail=f"One or more of the models '{body.model_names}' not found", + ) except ValueError as e: raise HTTPException(status_code=400, detail=str(e)) return response diff --git a/invokeai/app/api/routers/utilities.py b/invokeai/app/api/routers/utilities.py index e664cb9070..476d10e2c0 100644 --- a/invokeai/app/api/routers/utilities.py +++ b/invokeai/app/api/routers/utilities.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Optional, Union from dynamicprompts.generators import CombinatorialPromptGenerator, RandomPromptGenerator from fastapi import Body @@ -27,6 +27,7 @@ async def parse_dynamicprompts( combinatorial: bool = Body(default=True, description="Whether to use the combinatorial generator"), ) -> DynamicPromptsResponse: """Creates a batch process""" + generator: Union[RandomPromptGenerator, CombinatorialPromptGenerator] try: error: Optional[str] = None if combinatorial: diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index fdbd64b30d..5bbd8150c1 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -22,7 +22,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c from fastapi.staticfiles import StaticFiles from fastapi_events.handlers.local import local_handler from fastapi_events.middleware import EventHandlerASGIMiddleware - from pydantic.schema import schema + from pydantic.json_schema import models_json_schema # noinspection PyUnresolvedReferences import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import) @@ -31,7 +31,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c from ..backend.util.logging import InvokeAILogger from .api.dependencies import ApiDependencies - from .api.routers import app_info, board_images, boards, images, models, session_queue, sessions, utilities + from .api.routers import app_info, board_images, boards, images, models, session_queue, utilities from .api.sockets import SocketIO from .invocations.baseinvocation import BaseInvocation, UIConfigBase, _InputField, _OutputField @@ -51,7 +51,7 @@ mimetypes.add_type("text/css", ".css") # Create the app # TODO: create this all in a method so configuration/etc. can be passed in? -app = FastAPI(title="Invoke AI", docs_url=None, redoc_url=None) +app = FastAPI(title="Invoke AI", docs_url=None, redoc_url=None, separate_input_output_schemas=False) # Add event handler event_handler_id: int = id(app) @@ -63,18 +63,18 @@ app.add_middleware( socket_io = SocketIO(app) +app.add_middleware( + CORSMiddleware, + allow_origins=app_config.allow_origins, + allow_credentials=app_config.allow_credentials, + allow_methods=app_config.allow_methods, + allow_headers=app_config.allow_headers, +) + # Add startup event to load dependencies @app.on_event("startup") async def startup_event(): - app.add_middleware( - CORSMiddleware, - allow_origins=app_config.allow_origins, - allow_credentials=app_config.allow_credentials, - allow_methods=app_config.allow_methods, - allow_headers=app_config.allow_headers, - ) - ApiDependencies.initialize(config=app_config, event_handler_id=event_handler_id, logger=logger) @@ -85,12 +85,7 @@ async def shutdown_event(): # Include all routers -# TODO: REMOVE -# app.include_router( -# invocation.invocation_router, -# prefix = '/api') - -app.include_router(sessions.session_router, prefix="/api") +# app.include_router(sessions.session_router, prefix="/api") app.include_router(utilities.utilities_router, prefix="/api") @@ -117,6 +112,7 @@ def custom_openapi(): description="An API for invoking AI image operations", version="1.0.0", routes=app.routes, + separate_input_output_schemas=False, # https://fastapi.tiangolo.com/how-to/separate-openapi-schemas/ ) # Add all outputs @@ -127,29 +123,32 @@ def custom_openapi(): output_type = signature(invoker.invoke).return_annotation output_types.add(output_type) - output_schemas = schema(output_types, ref_prefix="#/components/schemas/") - for schema_key, output_schema in output_schemas["definitions"].items(): - output_schema["class"] = "output" - openapi_schema["components"]["schemas"][schema_key] = output_schema - + output_schemas = models_json_schema( + models=[(o, "serialization") for o in output_types], ref_template="#/components/schemas/{model}" + ) + for schema_key, output_schema in output_schemas[1]["$defs"].items(): # TODO: note that we assume the schema_key here is the TYPE.__name__ # This could break in some cases, figure out a better way to do it output_type_titles[schema_key] = output_schema["title"] # Add Node Editor UI helper schemas - ui_config_schemas = schema([UIConfigBase, _InputField, _OutputField], ref_prefix="#/components/schemas/") - for schema_key, ui_config_schema in ui_config_schemas["definitions"].items(): + ui_config_schemas = models_json_schema( + [(UIConfigBase, "serialization"), (_InputField, "serialization"), (_OutputField, "serialization")], + ref_template="#/components/schemas/{model}", + ) + for schema_key, ui_config_schema in ui_config_schemas[1]["$defs"].items(): openapi_schema["components"]["schemas"][schema_key] = ui_config_schema # Add a reference to the output type to additionalProperties of the invoker schema for invoker in all_invocations: invoker_name = invoker.__name__ - output_type = signature(invoker.invoke).return_annotation + output_type = signature(obj=invoker.invoke).return_annotation output_type_title = output_type_titles[output_type.__name__] - invoker_schema = openapi_schema["components"]["schemas"][invoker_name] + invoker_schema = openapi_schema["components"]["schemas"][f"{invoker_name}"] outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"} invoker_schema["output"] = outputs_ref invoker_schema["class"] = "invocation" + openapi_schema["components"]["schemas"][f"{output_type_title}"]["class"] = "output" from invokeai.backend.model_management.models import get_model_config_enums @@ -172,7 +171,7 @@ def custom_openapi(): return app.openapi_schema -app.openapi = custom_openapi +app.openapi = custom_openapi # type: ignore [method-assign] # this is a valid assignment # Override API doc favicons app.mount("/static", StaticFiles(directory=Path(web_dir.__path__[0], "static/dream_web")), name="static") diff --git a/invokeai/app/cli/commands.py b/invokeai/app/cli/commands.py index b000abcf6a..c21c6315ed 100644 --- a/invokeai/app/cli/commands.py +++ b/invokeai/app/cli/commands.py @@ -24,8 +24,8 @@ def add_field_argument(command_parser, name: str, field, default_override=None): if field.default_factory is None else field.default_factory() ) - if get_origin(field.type_) == Literal: - allowed_values = get_args(field.type_) + if get_origin(field.annotation) == Literal: + allowed_values = get_args(field.annotation) allowed_types = set() for val in allowed_values: allowed_types.add(type(val)) @@ -38,15 +38,15 @@ def add_field_argument(command_parser, name: str, field, default_override=None): type=field_type, default=default, choices=allowed_values, - help=field.field_info.description, + help=field.description, ) else: command_parser.add_argument( f"--{name}", dest=name, - type=field.type_, + type=field.annotation, default=default, - help=field.field_info.description, + help=field.description, ) @@ -142,7 +142,6 @@ class BaseCommand(ABC, BaseModel): """A CLI command""" # All commands must include a type name like this: - # type: Literal['your_command_name'] = 'your_command_name' @classmethod def get_all_subclasses(cls): diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index d82b94d0e9..8bd4a89f45 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -7,28 +7,16 @@ import re from abc import ABC, abstractmethod from enum import Enum from inspect import signature -from typing import ( - TYPE_CHECKING, - AbstractSet, - Any, - Callable, - ClassVar, - Literal, - Mapping, - Optional, - Type, - TypeVar, - Union, - get_args, - get_type_hints, -) +from types import UnionType +from typing import TYPE_CHECKING, Any, Callable, ClassVar, Iterable, Literal, Optional, Type, TypeVar, Union import semver -from pydantic import BaseModel, Field, validator -from pydantic.fields import ModelField, Undefined -from pydantic.typing import NoArgAnyCallable +from pydantic import BaseModel, ConfigDict, Field, create_model, field_validator +from pydantic.fields import _Unset +from pydantic_core import PydanticUndefined from invokeai.app.services.config.config_default import InvokeAIAppConfig +from invokeai.app.util.misc import uuid_string if TYPE_CHECKING: from ..services.invocation_services import InvocationServices @@ -211,6 +199,11 @@ class _InputField(BaseModel): ui_choice_labels: Optional[dict[str, str]] item_default: Optional[Any] + model_config = ConfigDict( + validate_assignment=True, + json_schema_serialization_defaults_required=True, + ) + class _OutputField(BaseModel): """ @@ -224,34 +217,36 @@ class _OutputField(BaseModel): ui_type: Optional[UIType] ui_order: Optional[int] + model_config = ConfigDict( + validate_assignment=True, + json_schema_serialization_defaults_required=True, + ) + + +def get_type(klass: BaseModel) -> str: + """Helper function to get an invocation or invocation output's type. This is the default value of the `type` field.""" + return klass.model_fields["type"].default + def InputField( - *args: Any, - default: Any = Undefined, - default_factory: Optional[NoArgAnyCallable] = None, - alias: Optional[str] = None, - title: Optional[str] = None, - description: Optional[str] = None, - exclude: Optional[Union[AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any]] = None, - include: Optional[Union[AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any]] = None, - const: Optional[bool] = None, - gt: Optional[float] = None, - ge: Optional[float] = None, - lt: Optional[float] = None, - le: Optional[float] = None, - multiple_of: Optional[float] = None, - allow_inf_nan: Optional[bool] = None, - max_digits: Optional[int] = None, - decimal_places: Optional[int] = None, - min_items: Optional[int] = None, - max_items: Optional[int] = None, - unique_items: Optional[bool] = None, - min_length: Optional[int] = None, - max_length: Optional[int] = None, - allow_mutation: bool = True, - regex: Optional[str] = None, - discriminator: Optional[str] = None, - repr: bool = True, + # copied from pydantic's Field + default: Any = _Unset, + default_factory: Callable[[], Any] | None = _Unset, + title: str | None = _Unset, + description: str | None = _Unset, + pattern: str | None = _Unset, + strict: bool | None = _Unset, + gt: float | None = _Unset, + ge: float | None = _Unset, + lt: float | None = _Unset, + le: float | None = _Unset, + multiple_of: float | None = _Unset, + allow_inf_nan: bool | None = _Unset, + max_digits: int | None = _Unset, + decimal_places: int | None = _Unset, + min_length: int | None = _Unset, + max_length: int | None = _Unset, + # custom input: Input = Input.Any, ui_type: Optional[UIType] = None, ui_component: Optional[UIComponent] = None, @@ -259,7 +254,6 @@ def InputField( ui_order: Optional[int] = None, ui_choice_labels: Optional[dict[str, str]] = None, item_default: Optional[Any] = None, - **kwargs: Any, ) -> Any: """ Creates an input field for an invocation. @@ -289,18 +283,26 @@ def InputField( : param int ui_order: [None] Specifies the order in which this field should be rendered in the UI. \ : param bool item_default: [None] Specifies the default item value, if this is a collection input. \ - Ignored for non-collection fields.. + Ignored for non-collection fields. """ - return Field( - *args, + + json_schema_extra_: dict[str, Any] = dict( + input=input, + ui_type=ui_type, + ui_component=ui_component, + ui_hidden=ui_hidden, + ui_order=ui_order, + item_default=item_default, + ui_choice_labels=ui_choice_labels, + ) + + field_args = dict( default=default, default_factory=default_factory, - alias=alias, title=title, description=description, - exclude=exclude, - include=include, - const=const, + pattern=pattern, + strict=strict, gt=gt, ge=ge, lt=lt, @@ -309,57 +311,92 @@ def InputField( allow_inf_nan=allow_inf_nan, max_digits=max_digits, decimal_places=decimal_places, - min_items=min_items, - max_items=max_items, - unique_items=unique_items, min_length=min_length, max_length=max_length, - allow_mutation=allow_mutation, - regex=regex, - discriminator=discriminator, - repr=repr, - input=input, - ui_type=ui_type, - ui_component=ui_component, - ui_hidden=ui_hidden, - ui_order=ui_order, - item_default=item_default, - ui_choice_labels=ui_choice_labels, - **kwargs, + ) + + """ + Invocation definitions have their fields typed correctly for their `invoke()` functions. + This typing is often more specific than the actual invocation definition requires, because + fields may have values provided only by connections. + + For example, consider an ResizeImageInvocation with an `image: ImageField` field. + + `image` is required during the call to `invoke()`, but when the python class is instantiated, + the field may not be present. This is fine, because that image field will be provided by a + an ancestor node that outputs the image. + + So we'd like to type that `image` field as `Optional[ImageField]`. If we do that, however, then + we need to handle a lot of extra logic in the `invoke()` function to check if the field has a + value or not. This is very tedious. + + Ideally, the invocation definition would be able to specify that the field is required during + invocation, but optional during instantiation. So the field would be typed as `image: ImageField`, + but when calling the `invoke()` function, we raise an error if the field is not present. + + To do this, we need to do a bit of fanagling to make the pydantic field optional, and then do + extra validation when calling `invoke()`. + + There is some additional logic here to cleaning create the pydantic field via the wrapper. + """ + + # Filter out field args not provided + provided_args = {k: v for (k, v) in field_args.items() if v is not PydanticUndefined} + + if (default is not PydanticUndefined) and (default_factory is not PydanticUndefined): + raise ValueError("Cannot specify both default and default_factory") + + # because we are manually making fields optional, we need to store the original required bool for reference later + if default is PydanticUndefined and default_factory is PydanticUndefined: + json_schema_extra_.update(dict(orig_required=True)) + else: + json_schema_extra_.update(dict(orig_required=False)) + + # make Input.Any and Input.Connection fields optional, providing None as a default if the field doesn't already have one + if (input is Input.Any or input is Input.Connection) and default_factory is PydanticUndefined: + default_ = None if default is PydanticUndefined else default + provided_args.update(dict(default=default_)) + if default is not PydanticUndefined: + # before invoking, we'll grab the original default value and set it on the field if the field wasn't provided a value + json_schema_extra_.update(dict(default=default)) + json_schema_extra_.update(dict(orig_default=default)) + elif default is not PydanticUndefined and default_factory is PydanticUndefined: + default_ = default + provided_args.update(dict(default=default_)) + json_schema_extra_.update(dict(orig_default=default_)) + elif default_factory is not PydanticUndefined: + provided_args.update(dict(default_factory=default_factory)) + # TODO: cannot serialize default_factory... + # json_schema_extra_.update(dict(orig_default_factory=default_factory)) + + return Field( + **provided_args, + json_schema_extra=json_schema_extra_, ) def OutputField( - *args: Any, - default: Any = Undefined, - default_factory: Optional[NoArgAnyCallable] = None, - alias: Optional[str] = None, - title: Optional[str] = None, - description: Optional[str] = None, - exclude: Optional[Union[AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any]] = None, - include: Optional[Union[AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any]] = None, - const: Optional[bool] = None, - gt: Optional[float] = None, - ge: Optional[float] = None, - lt: Optional[float] = None, - le: Optional[float] = None, - multiple_of: Optional[float] = None, - allow_inf_nan: Optional[bool] = None, - max_digits: Optional[int] = None, - decimal_places: Optional[int] = None, - min_items: Optional[int] = None, - max_items: Optional[int] = None, - unique_items: Optional[bool] = None, - min_length: Optional[int] = None, - max_length: Optional[int] = None, - allow_mutation: bool = True, - regex: Optional[str] = None, - discriminator: Optional[str] = None, - repr: bool = True, + # copied from pydantic's Field + default: Any = _Unset, + default_factory: Callable[[], Any] | None = _Unset, + title: str | None = _Unset, + description: str | None = _Unset, + pattern: str | None = _Unset, + strict: bool | None = _Unset, + gt: float | None = _Unset, + ge: float | None = _Unset, + lt: float | None = _Unset, + le: float | None = _Unset, + multiple_of: float | None = _Unset, + allow_inf_nan: bool | None = _Unset, + max_digits: int | None = _Unset, + decimal_places: int | None = _Unset, + min_length: int | None = _Unset, + max_length: int | None = _Unset, + # custom ui_type: Optional[UIType] = None, ui_hidden: bool = False, ui_order: Optional[int] = None, - **kwargs: Any, ) -> Any: """ Creates an output field for an invocation output. @@ -379,15 +416,12 @@ def OutputField( : param int ui_order: [None] Specifies the order in which this field should be rendered in the UI. \ """ return Field( - *args, default=default, default_factory=default_factory, - alias=alias, title=title, description=description, - exclude=exclude, - include=include, - const=const, + pattern=pattern, + strict=strict, gt=gt, ge=ge, lt=lt, @@ -396,19 +430,13 @@ def OutputField( allow_inf_nan=allow_inf_nan, max_digits=max_digits, decimal_places=decimal_places, - min_items=min_items, - max_items=max_items, - unique_items=unique_items, min_length=min_length, max_length=max_length, - allow_mutation=allow_mutation, - regex=regex, - discriminator=discriminator, - repr=repr, - ui_type=ui_type, - ui_hidden=ui_hidden, - ui_order=ui_order, - **kwargs, + json_schema_extra=dict( + ui_type=ui_type, + ui_hidden=ui_hidden, + ui_order=ui_order, + ), ) @@ -422,7 +450,13 @@ class UIConfigBase(BaseModel): title: Optional[str] = Field(default=None, description="The node's display name") category: Optional[str] = Field(default=None, description="The node's category") version: Optional[str] = Field( - default=None, description='The node\'s version. Should be a valid semver string e.g. "1.0.0" or "3.8.13".' + default=None, + description='The node\'s version. Should be a valid semver string e.g. "1.0.0" or "3.8.13".', + ) + + model_config = ConfigDict( + validate_assignment=True, + json_schema_serialization_defaults_required=True, ) @@ -457,23 +491,38 @@ class BaseInvocationOutput(BaseModel): All invocation outputs must use the `@invocation_output` decorator to provide their unique type. """ - @classmethod - def get_all_subclasses_tuple(cls): - subclasses = [] - toprocess = [cls] - while len(toprocess) > 0: - next = toprocess.pop(0) - next_subclasses = next.__subclasses__() - subclasses.extend(next_subclasses) - toprocess.extend(next_subclasses) - return tuple(subclasses) + _output_classes: ClassVar[set[BaseInvocationOutput]] = set() - class Config: - @staticmethod - def schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None: - if "required" not in schema or not isinstance(schema["required"], list): - schema["required"] = list() - schema["required"].extend(["type"]) + @classmethod + def register_output(cls, output: BaseInvocationOutput) -> None: + cls._output_classes.add(output) + + @classmethod + def get_outputs(cls) -> Iterable[BaseInvocationOutput]: + return cls._output_classes + + @classmethod + def get_outputs_union(cls) -> UnionType: + outputs_union = Union[tuple(cls._output_classes)] # type: ignore [valid-type] + return outputs_union # type: ignore [return-value] + + @classmethod + def get_output_types(cls) -> Iterable[str]: + return map(lambda i: get_type(i), BaseInvocationOutput.get_outputs()) + + @staticmethod + def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None: + # Because we use a pydantic Literal field with default value for the invocation type, + # it will be typed as optional in the OpenAPI schema. Make it required manually. + if "required" not in schema or not isinstance(schema["required"], list): + schema["required"] = list() + schema["required"].extend(["type"]) + + model_config = ConfigDict( + validate_assignment=True, + json_schema_serialization_defaults_required=True, + json_schema_extra=json_schema_extra, + ) class RequiredConnectionException(Exception): @@ -498,104 +547,91 @@ class BaseInvocation(ABC, BaseModel): All invocations must use the `@invocation` decorator to provide their unique type. """ + _invocation_classes: ClassVar[set[BaseInvocation]] = set() + @classmethod - def get_all_subclasses(cls): + def register_invocation(cls, invocation: BaseInvocation) -> None: + cls._invocation_classes.add(invocation) + + @classmethod + def get_invocations_union(cls) -> UnionType: + invocations_union = Union[tuple(cls._invocation_classes)] # type: ignore [valid-type] + return invocations_union # type: ignore [return-value] + + @classmethod + def get_invocations(cls) -> Iterable[BaseInvocation]: app_config = InvokeAIAppConfig.get_config() - subclasses = [] - toprocess = [cls] - while len(toprocess) > 0: - next = toprocess.pop(0) - next_subclasses = next.__subclasses__() - subclasses.extend(next_subclasses) - toprocess.extend(next_subclasses) - allowed_invocations = [] - for sc in subclasses: + allowed_invocations: set[BaseInvocation] = set() + for sc in cls._invocation_classes: + invocation_type = get_type(sc) is_in_allowlist = ( - sc.__fields__.get("type").default in app_config.allow_nodes - if isinstance(app_config.allow_nodes, list) - else True + invocation_type in app_config.allow_nodes if isinstance(app_config.allow_nodes, list) else True ) - is_in_denylist = ( - sc.__fields__.get("type").default in app_config.deny_nodes - if isinstance(app_config.deny_nodes, list) - else False + invocation_type in app_config.deny_nodes if isinstance(app_config.deny_nodes, list) else False ) - if is_in_allowlist and not is_in_denylist: - allowed_invocations.append(sc) + allowed_invocations.add(sc) return allowed_invocations @classmethod - def get_invocations(cls): - return tuple(BaseInvocation.get_all_subclasses()) - - @classmethod - def get_invocations_map(cls): + def get_invocations_map(cls) -> dict[str, BaseInvocation]: # Get the type strings out of the literals and into a dictionary return dict( map( - lambda t: (get_args(get_type_hints(t)["type"])[0], t), - BaseInvocation.get_all_subclasses(), + lambda i: (get_type(i), i), + BaseInvocation.get_invocations(), ) ) @classmethod - def get_output_type(cls): + def get_invocation_types(cls) -> Iterable[str]: + return map(lambda i: get_type(i), BaseInvocation.get_invocations()) + + @classmethod + def get_output_type(cls) -> BaseInvocationOutput: return signature(cls.invoke).return_annotation - class Config: - validate_assignment = True - validate_all = True - - @staticmethod - def schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None: - uiconfig = getattr(model_class, "UIConfig", None) - if uiconfig and hasattr(uiconfig, "title"): - schema["title"] = uiconfig.title - if uiconfig and hasattr(uiconfig, "tags"): - schema["tags"] = uiconfig.tags - if uiconfig and hasattr(uiconfig, "category"): - schema["category"] = uiconfig.category - if uiconfig and hasattr(uiconfig, "version"): - schema["version"] = uiconfig.version - if "required" not in schema or not isinstance(schema["required"], list): - schema["required"] = list() - schema["required"].extend(["type", "id"]) + @staticmethod + def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None: + # Add the various UI-facing attributes to the schema. These are used to build the invocation templates. + uiconfig = getattr(model_class, "UIConfig", None) + if uiconfig and hasattr(uiconfig, "title"): + schema["title"] = uiconfig.title + if uiconfig and hasattr(uiconfig, "tags"): + schema["tags"] = uiconfig.tags + if uiconfig and hasattr(uiconfig, "category"): + schema["category"] = uiconfig.category + if uiconfig and hasattr(uiconfig, "version"): + schema["version"] = uiconfig.version + if "required" not in schema or not isinstance(schema["required"], list): + schema["required"] = list() + schema["required"].extend(["type", "id"]) @abstractmethod def invoke(self, context: InvocationContext) -> BaseInvocationOutput: """Invoke with provided context and return outputs.""" pass - def __init__(self, **data): - # nodes may have required fields, that can accept input from connections - # on instantiation of the model, we need to exclude these from validation - restore = dict() - try: - field_names = list(self.__fields__.keys()) - for field_name in field_names: - # if the field is required and may get its value from a connection, exclude it from validation - field = self.__fields__[field_name] - _input = field.field_info.extra.get("input", None) - if _input in [Input.Connection, Input.Any] and field.required: - if field_name not in data: - restore[field_name] = self.__fields__.pop(field_name) - # instantiate the node, which will validate the data - super().__init__(**data) - finally: - # restore the removed fields - for field_name, field in restore.items(): - self.__fields__[field_name] = field - def invoke_internal(self, context: InvocationContext) -> BaseInvocationOutput: - for field_name, field in self.__fields__.items(): - _input = field.field_info.extra.get("input", None) - if field.required and not hasattr(self, field_name): - if _input == Input.Connection: - raise RequiredConnectionException(self.__fields__["type"].default, field_name) - elif _input == Input.Any: - raise MissingInputException(self.__fields__["type"].default, field_name) + for field_name, field in self.model_fields.items(): + if not field.json_schema_extra or callable(field.json_schema_extra): + # something has gone terribly awry, we should always have this and it should be a dict + continue + + # Here we handle the case where the field is optional in the pydantic class, but required + # in the `invoke()` method. + + orig_default = field.json_schema_extra.get("orig_default", PydanticUndefined) + orig_required = field.json_schema_extra.get("orig_required", True) + input_ = field.json_schema_extra.get("input", None) + if orig_default is not PydanticUndefined and not hasattr(self, field_name): + setattr(self, field_name, orig_default) + if orig_required and orig_default is PydanticUndefined and getattr(self, field_name) is None: + if input_ == Input.Connection: + raise RequiredConnectionException(self.model_fields["type"].default, field_name) + elif input_ == Input.Any: + raise MissingInputException(self.model_fields["type"].default, field_name) # skip node cache codepath if it's disabled if context.services.configuration.node_cache_size == 0: @@ -618,23 +654,31 @@ class BaseInvocation(ABC, BaseModel): return self.invoke(context) def get_type(self) -> str: - return self.__fields__["type"].default + return self.model_fields["type"].default id: str = Field( - description="The id of this instance of an invocation. Must be unique among all instances of invocations." + default_factory=uuid_string, + description="The id of this instance of an invocation. Must be unique among all instances of invocations.", ) - is_intermediate: bool = InputField( - default=False, description="Whether or not this is an intermediate invocation.", ui_type=UIType.IsIntermediate + is_intermediate: Optional[bool] = Field( + default=False, + description="Whether or not this is an intermediate invocation.", + json_schema_extra=dict(ui_type=UIType.IsIntermediate), ) - workflow: Optional[str] = InputField( + workflow: Optional[str] = Field( default=None, description="The workflow to save with the image", - ui_type=UIType.WorkflowField, + json_schema_extra=dict(ui_type=UIType.WorkflowField), + ) + use_cache: Optional[bool] = Field( + default=True, + description="Whether or not to use the cache", ) - use_cache: bool = InputField(default=True, description="Whether or not to use the cache") - @validator("workflow", pre=True) + @field_validator("workflow", mode="before") + @classmethod def validate_workflow_is_json(cls, v): + """We don't have a workflow schema in the backend, so we just check that it's valid JSON""" if v is None: return None try: @@ -645,8 +689,14 @@ class BaseInvocation(ABC, BaseModel): UIConfig: ClassVar[Type[UIConfigBase]] + model_config = ConfigDict( + validate_assignment=True, + json_schema_extra=json_schema_extra, + json_schema_serialization_defaults_required=True, + ) -GenericBaseInvocation = TypeVar("GenericBaseInvocation", bound=BaseInvocation) + +TBaseInvocation = TypeVar("TBaseInvocation", bound=BaseInvocation) def invocation( @@ -656,7 +706,7 @@ def invocation( category: Optional[str] = None, version: Optional[str] = None, use_cache: Optional[bool] = True, -) -> Callable[[Type[GenericBaseInvocation]], Type[GenericBaseInvocation]]: +) -> Callable[[Type[TBaseInvocation]], Type[TBaseInvocation]]: """ Adds metadata to an invocation. @@ -668,12 +718,15 @@ def invocation( :param Optional[bool] use_cache: Whether or not to use the invocation cache. Defaults to True. The user may override this in the workflow editor. """ - def wrapper(cls: Type[GenericBaseInvocation]) -> Type[GenericBaseInvocation]: + def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]: # Validate invocation types on creation of invocation classes # TODO: ensure unique? if re.compile(r"^\S+$").match(invocation_type) is None: raise ValueError(f'"invocation_type" must consist of non-whitespace characters, got "{invocation_type}"') + if invocation_type in BaseInvocation.get_invocation_types(): + raise ValueError(f'Invocation type "{invocation_type}" already exists') + # Add OpenAPI schema extras uiconf_name = cls.__qualname__ + ".UIConfig" if not hasattr(cls, "UIConfig") or cls.UIConfig.__qualname__ != uiconf_name: @@ -691,59 +744,83 @@ def invocation( raise InvalidVersionError(f'Invalid version string for node "{invocation_type}": "{version}"') from e cls.UIConfig.version = version if use_cache is not None: - cls.__fields__["use_cache"].default = use_cache + cls.model_fields["use_cache"].default = use_cache + + # Add the invocation type to the model. + + # You'd be tempted to just add the type field and rebuild the model, like this: + # cls.model_fields.update(type=FieldInfo.from_annotated_attribute(Literal[invocation_type], invocation_type)) + # cls.model_rebuild() or cls.model_rebuild(force=True) + + # Unfortunately, because the `GraphInvocation` uses a forward ref in its `graph` field's annotation, this does + # not work. Instead, we have to create a new class with the type field and patch the original class with it. - # Add the invocation type to the pydantic model of the invocation invocation_type_annotation = Literal[invocation_type] # type: ignore - invocation_type_field = ModelField.infer( - name="type", - value=invocation_type, - annotation=invocation_type_annotation, - class_validators=None, - config=cls.__config__, + invocation_type_field = Field( + title="type", + default=invocation_type, ) - cls.__fields__.update({"type": invocation_type_field}) - # to support 3.9, 3.10 and 3.11, as described in https://docs.python.org/3/howto/annotations.html - if annotations := cls.__dict__.get("__annotations__", None): - annotations.update({"type": invocation_type_annotation}) + + docstring = cls.__doc__ + cls = create_model( + cls.__qualname__, + __base__=cls, + __module__=cls.__module__, + type=(invocation_type_annotation, invocation_type_field), + ) + cls.__doc__ = docstring + + # TODO: how to type this correctly? it's typed as ModelMetaclass, a private class in pydantic + BaseInvocation.register_invocation(cls) # type: ignore + return cls return wrapper -GenericBaseInvocationOutput = TypeVar("GenericBaseInvocationOutput", bound=BaseInvocationOutput) +TBaseInvocationOutput = TypeVar("TBaseInvocationOutput", bound=BaseInvocationOutput) def invocation_output( output_type: str, -) -> Callable[[Type[GenericBaseInvocationOutput]], Type[GenericBaseInvocationOutput]]: +) -> Callable[[Type[TBaseInvocationOutput]], Type[TBaseInvocationOutput]]: """ Adds metadata to an invocation output. :param str output_type: The type of the invocation output. Must be unique among all invocation outputs. """ - def wrapper(cls: Type[GenericBaseInvocationOutput]) -> Type[GenericBaseInvocationOutput]: + def wrapper(cls: Type[TBaseInvocationOutput]) -> Type[TBaseInvocationOutput]: # Validate output types on creation of invocation output classes # TODO: ensure unique? if re.compile(r"^\S+$").match(output_type) is None: raise ValueError(f'"output_type" must consist of non-whitespace characters, got "{output_type}"') - # Add the output type to the pydantic model of the invocation output - output_type_annotation = Literal[output_type] # type: ignore - output_type_field = ModelField.infer( - name="type", - value=output_type, - annotation=output_type_annotation, - class_validators=None, - config=cls.__config__, - ) - cls.__fields__.update({"type": output_type_field}) + if output_type in BaseInvocationOutput.get_output_types(): + raise ValueError(f'Invocation type "{output_type}" already exists') - # to support 3.9, 3.10 and 3.11, as described in https://docs.python.org/3/howto/annotations.html - if annotations := cls.__dict__.get("__annotations__", None): - annotations.update({"type": output_type_annotation}) + # Add the output type to the model. + + output_type_annotation = Literal[output_type] # type: ignore + output_type_field = Field( + title="type", + default=output_type, + ) + + docstring = cls.__doc__ + cls = create_model( + cls.__qualname__, + __base__=cls, + __module__=cls.__module__, + type=(output_type_annotation, output_type_field), + ) + cls.__doc__ = docstring + + BaseInvocationOutput.register_output(cls) # type: ignore # TODO: how to type this correctly? return cls return wrapper + + +GenericBaseModel = TypeVar("GenericBaseModel", bound=BaseModel) diff --git a/invokeai/app/invocations/collections.py b/invokeai/app/invocations/collections.py index 83863422f8..f26eebe1ff 100644 --- a/invokeai/app/invocations/collections.py +++ b/invokeai/app/invocations/collections.py @@ -2,7 +2,7 @@ import numpy as np -from pydantic import validator +from pydantic import ValidationInfo, field_validator from invokeai.app.invocations.primitives import IntegerCollectionOutput from invokeai.app.util.misc import SEED_MAX, get_random_seed @@ -20,9 +20,9 @@ class RangeInvocation(BaseInvocation): stop: int = InputField(default=10, description="The stop of the range") step: int = InputField(default=1, description="The step of the range") - @validator("stop") - def stop_gt_start(cls, v, values): - if "start" in values and v <= values["start"]: + @field_validator("stop") + def stop_gt_start(cls, v: int, info: ValidationInfo): + if "start" in info.data and v <= info.data["start"]: raise ValueError("stop must be greater than start") return v diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index b2634c2c56..b3ebc92320 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -1,6 +1,6 @@ import re from dataclasses import dataclass -from typing import List, Union +from typing import List, Optional, Union import torch from compel import Compel, ReturnedEmbeddingsType @@ -43,7 +43,13 @@ class ConditioningFieldData: # PerpNeg = "perp_neg" -@invocation("compel", title="Prompt", tags=["prompt", "compel"], category="conditioning", version="1.0.0") +@invocation( + "compel", + title="Prompt", + tags=["prompt", "compel"], + category="conditioning", + version="1.0.0", +) class CompelInvocation(BaseInvocation): """Parse prompt using compel package to conditioning.""" @@ -61,17 +67,19 @@ class CompelInvocation(BaseInvocation): @torch.no_grad() def invoke(self, context: InvocationContext) -> ConditioningOutput: tokenizer_info = context.services.model_manager.get_model( - **self.clip.tokenizer.dict(), + **self.clip.tokenizer.model_dump(), context=context, ) text_encoder_info = context.services.model_manager.get_model( - **self.clip.text_encoder.dict(), + **self.clip.text_encoder.model_dump(), context=context, ) def _lora_loader(): for lora in self.clip.loras: - lora_info = context.services.model_manager.get_model(**lora.dict(exclude={"weight"}), context=context) + lora_info = context.services.model_manager.get_model( + **lora.model_dump(exclude={"weight"}), context=context + ) yield (lora_info.context.model, lora.weight) del lora_info return @@ -160,11 +168,11 @@ class SDXLPromptInvocationBase: zero_on_empty: bool, ): tokenizer_info = context.services.model_manager.get_model( - **clip_field.tokenizer.dict(), + **clip_field.tokenizer.model_dump(), context=context, ) text_encoder_info = context.services.model_manager.get_model( - **clip_field.text_encoder.dict(), + **clip_field.text_encoder.model_dump(), context=context, ) @@ -172,7 +180,11 @@ class SDXLPromptInvocationBase: if prompt == "" and zero_on_empty: cpu_text_encoder = text_encoder_info.context.model c = torch.zeros( - (1, cpu_text_encoder.config.max_position_embeddings, cpu_text_encoder.config.hidden_size), + ( + 1, + cpu_text_encoder.config.max_position_embeddings, + cpu_text_encoder.config.hidden_size, + ), dtype=text_encoder_info.context.cache.precision, ) if get_pooled: @@ -186,7 +198,9 @@ class SDXLPromptInvocationBase: def _lora_loader(): for lora in clip_field.loras: - lora_info = context.services.model_manager.get_model(**lora.dict(exclude={"weight"}), context=context) + lora_info = context.services.model_manager.get_model( + **lora.model_dump(exclude={"weight"}), context=context + ) yield (lora_info.context.model, lora.weight) del lora_info return @@ -273,8 +287,16 @@ class SDXLPromptInvocationBase: class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): """Parse prompt using compel package to conditioning.""" - prompt: str = InputField(default="", description=FieldDescriptions.compel_prompt, ui_component=UIComponent.Textarea) - style: str = InputField(default="", description=FieldDescriptions.compel_prompt, ui_component=UIComponent.Textarea) + prompt: str = InputField( + default="", + description=FieldDescriptions.compel_prompt, + ui_component=UIComponent.Textarea, + ) + style: str = InputField( + default="", + description=FieldDescriptions.compel_prompt, + ui_component=UIComponent.Textarea, + ) original_width: int = InputField(default=1024, description="") original_height: int = InputField(default=1024, description="") crop_top: int = InputField(default=0, description="") @@ -310,7 +332,9 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): [ c1, torch.zeros( - (c1.shape[0], c2.shape[1] - c1.shape[1], c1.shape[2]), device=c1.device, dtype=c1.dtype + (c1.shape[0], c2.shape[1] - c1.shape[1], c1.shape[2]), + device=c1.device, + dtype=c1.dtype, ), ], dim=1, @@ -321,7 +345,9 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): [ c2, torch.zeros( - (c2.shape[0], c1.shape[1] - c2.shape[1], c2.shape[2]), device=c2.device, dtype=c2.dtype + (c2.shape[0], c1.shape[1] - c2.shape[1], c2.shape[2]), + device=c2.device, + dtype=c2.dtype, ), ], dim=1, @@ -359,7 +385,9 @@ class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase """Parse prompt using compel package to conditioning.""" style: str = InputField( - default="", description=FieldDescriptions.compel_prompt, ui_component=UIComponent.Textarea + default="", + description=FieldDescriptions.compel_prompt, + ui_component=UIComponent.Textarea, ) # TODO: ? original_width: int = InputField(default=1024, description="") original_height: int = InputField(default=1024, description="") @@ -403,10 +431,16 @@ class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase class ClipSkipInvocationOutput(BaseInvocationOutput): """Clip skip node output""" - clip: ClipField = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP") + clip: Optional[ClipField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP") -@invocation("clip_skip", title="CLIP Skip", tags=["clipskip", "clip", "skip"], category="conditioning", version="1.0.0") +@invocation( + "clip_skip", + title="CLIP Skip", + tags=["clipskip", "clip", "skip"], + category="conditioning", + version="1.0.0", +) class ClipSkipInvocation(BaseInvocation): """Skip layers in clip text_encoder model.""" @@ -421,7 +455,9 @@ class ClipSkipInvocation(BaseInvocation): def get_max_token_count( - tokenizer, prompt: Union[FlattenedPrompt, Blend, Conjunction], truncate_if_too_long=False + tokenizer, + prompt: Union[FlattenedPrompt, Blend, Conjunction], + truncate_if_too_long=False, ) -> int: if type(prompt) is Blend: blend: Blend = prompt diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index 59a36935df..200c37d851 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -2,7 +2,7 @@ # initial implementation by Gregg Helt, 2023 # heavily leverages controlnet_aux package: https://github.com/patrickvonplaten/controlnet_aux from builtins import bool, float -from typing import Dict, List, Literal, Optional, Union +from typing import Dict, List, Literal, Union import cv2 import numpy as np @@ -24,7 +24,7 @@ from controlnet_aux import ( ) from controlnet_aux.util import HWC3, ade_palette from PIL import Image -from pydantic import BaseModel, Field, validator +from pydantic import BaseModel, ConfigDict, Field, field_validator from invokeai.app.invocations.primitives import ImageField, ImageOutput from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin @@ -57,6 +57,8 @@ class ControlNetModelField(BaseModel): model_name: str = Field(description="Name of the ControlNet model") base_model: BaseModelType = Field(description="Base model") + model_config = ConfigDict(protected_namespaces=()) + class ControlField(BaseModel): image: ImageField = Field(description="The control image") @@ -71,7 +73,7 @@ class ControlField(BaseModel): control_mode: CONTROLNET_MODE_VALUES = Field(default="balanced", description="The control mode to use") resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use") - @validator("control_weight") + @field_validator("control_weight") def validate_control_weight(cls, v): """Validate that all control weights in the valid range""" if isinstance(v, list): @@ -124,9 +126,7 @@ class ControlNetInvocation(BaseInvocation): ) -@invocation( - "image_processor", title="Base Image Processor", tags=["controlnet"], category="controlnet", version="1.0.0" -) +# This invocation exists for other invocations to subclass it - do not register with @invocation! class ImageProcessorInvocation(BaseInvocation): """Base class for invocations that preprocess images for ControlNet""" @@ -393,9 +393,9 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation): detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res) image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res) - h: Optional[int] = InputField(default=512, ge=0, description="Content shuffle `h` parameter") - w: Optional[int] = InputField(default=512, ge=0, description="Content shuffle `w` parameter") - f: Optional[int] = InputField(default=256, ge=0, description="Content shuffle `f` parameter") + h: int = InputField(default=512, ge=0, description="Content shuffle `h` parameter") + w: int = InputField(default=512, ge=0, description="Content shuffle `w` parameter") + f: int = InputField(default=256, ge=0, description="Content shuffle `f` parameter") def run_processor(self, image): content_shuffle_processor = ContentShuffleDetector() @@ -575,14 +575,14 @@ class ColorMapImageProcessorInvocation(ImageProcessorInvocation): def run_processor(self, image: Image.Image): image = image.convert("RGB") - image = np.array(image, dtype=np.uint8) - height, width = image.shape[:2] + np_image = np.array(image, dtype=np.uint8) + height, width = np_image.shape[:2] width_tile_size = min(self.color_map_tile_size, width) height_tile_size = min(self.color_map_tile_size, height) color_map = cv2.resize( - image, + np_image, (width // width_tile_size, height // height_tile_size), interpolation=cv2.INTER_CUBIC, ) diff --git a/invokeai/app/invocations/facetools.py b/invokeai/app/invocations/facetools.py index 31ab77bd1a..40e15e9476 100644 --- a/invokeai/app/invocations/facetools.py +++ b/invokeai/app/invocations/facetools.py @@ -8,7 +8,7 @@ import numpy as np from mediapipe.python.solutions.face_mesh import FaceMesh # type: ignore[import] from PIL import Image, ImageDraw, ImageFilter, ImageFont, ImageOps from PIL.Image import Image as ImageType -from pydantic import validator +from pydantic import field_validator import invokeai.assets.fonts as font_assets from invokeai.app.invocations.baseinvocation import ( @@ -550,7 +550,7 @@ class FaceMaskInvocation(BaseInvocation): ) invert_mask: bool = InputField(default=False, description="Toggle to invert the mask") - @validator("face_ids") + @field_validator("face_ids") def validate_comma_separated_ints(cls, v) -> str: comma_separated_ints_regex = re.compile(r"^\d*(,\d+)*$") if comma_separated_ints_regex.match(v) is None: diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index 2d59a567c0..3a4f4eadac 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -36,7 +36,13 @@ class ShowImageInvocation(BaseInvocation): ) -@invocation("blank_image", title="Blank Image", tags=["image"], category="image", version="1.0.0") +@invocation( + "blank_image", + title="Blank Image", + tags=["image"], + category="image", + version="1.0.0", +) class BlankImageInvocation(BaseInvocation): """Creates a blank image and forwards it to the pipeline""" @@ -65,7 +71,13 @@ class BlankImageInvocation(BaseInvocation): ) -@invocation("img_crop", title="Crop Image", tags=["image", "crop"], category="image", version="1.0.0") +@invocation( + "img_crop", + title="Crop Image", + tags=["image", "crop"], + category="image", + version="1.0.0", +) class ImageCropInvocation(BaseInvocation): """Crops an image to a specified box. The box can be outside of the image.""" @@ -98,7 +110,13 @@ class ImageCropInvocation(BaseInvocation): ) -@invocation("img_paste", title="Paste Image", tags=["image", "paste"], category="image", version="1.0.1") +@invocation( + "img_paste", + title="Paste Image", + tags=["image", "paste"], + category="image", + version="1.0.1", +) class ImagePasteInvocation(BaseInvocation): """Pastes an image into another image.""" @@ -151,7 +169,13 @@ class ImagePasteInvocation(BaseInvocation): ) -@invocation("tomask", title="Mask from Alpha", tags=["image", "mask"], category="image", version="1.0.0") +@invocation( + "tomask", + title="Mask from Alpha", + tags=["image", "mask"], + category="image", + version="1.0.0", +) class MaskFromAlphaInvocation(BaseInvocation): """Extracts the alpha channel of an image as a mask.""" @@ -182,7 +206,13 @@ class MaskFromAlphaInvocation(BaseInvocation): ) -@invocation("img_mul", title="Multiply Images", tags=["image", "multiply"], category="image", version="1.0.0") +@invocation( + "img_mul", + title="Multiply Images", + tags=["image", "multiply"], + category="image", + version="1.0.0", +) class ImageMultiplyInvocation(BaseInvocation): """Multiplies two images together using `PIL.ImageChops.multiply()`.""" @@ -215,7 +245,13 @@ class ImageMultiplyInvocation(BaseInvocation): IMAGE_CHANNELS = Literal["A", "R", "G", "B"] -@invocation("img_chan", title="Extract Image Channel", tags=["image", "channel"], category="image", version="1.0.0") +@invocation( + "img_chan", + title="Extract Image Channel", + tags=["image", "channel"], + category="image", + version="1.0.0", +) class ImageChannelInvocation(BaseInvocation): """Gets a channel from an image.""" @@ -247,7 +283,13 @@ class ImageChannelInvocation(BaseInvocation): IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"] -@invocation("img_conv", title="Convert Image Mode", tags=["image", "convert"], category="image", version="1.0.0") +@invocation( + "img_conv", + title="Convert Image Mode", + tags=["image", "convert"], + category="image", + version="1.0.0", +) class ImageConvertInvocation(BaseInvocation): """Converts an image to a different mode.""" @@ -276,7 +318,13 @@ class ImageConvertInvocation(BaseInvocation): ) -@invocation("img_blur", title="Blur Image", tags=["image", "blur"], category="image", version="1.0.0") +@invocation( + "img_blur", + title="Blur Image", + tags=["image", "blur"], + category="image", + version="1.0.0", +) class ImageBlurInvocation(BaseInvocation): """Blurs an image""" @@ -330,7 +378,13 @@ PIL_RESAMPLING_MAP = { } -@invocation("img_resize", title="Resize Image", tags=["image", "resize"], category="image", version="1.0.0") +@invocation( + "img_resize", + title="Resize Image", + tags=["image", "resize"], + category="image", + version="1.0.0", +) class ImageResizeInvocation(BaseInvocation): """Resizes an image to specific dimensions""" @@ -359,7 +413,7 @@ class ImageResizeInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) @@ -370,7 +424,13 @@ class ImageResizeInvocation(BaseInvocation): ) -@invocation("img_scale", title="Scale Image", tags=["image", "scale"], category="image", version="1.0.0") +@invocation( + "img_scale", + title="Scale Image", + tags=["image", "scale"], + category="image", + version="1.0.0", +) class ImageScaleInvocation(BaseInvocation): """Scales an image by a factor""" @@ -411,7 +471,13 @@ class ImageScaleInvocation(BaseInvocation): ) -@invocation("img_lerp", title="Lerp Image", tags=["image", "lerp"], category="image", version="1.0.0") +@invocation( + "img_lerp", + title="Lerp Image", + tags=["image", "lerp"], + category="image", + version="1.0.0", +) class ImageLerpInvocation(BaseInvocation): """Linear interpolation of all pixels of an image""" @@ -444,7 +510,13 @@ class ImageLerpInvocation(BaseInvocation): ) -@invocation("img_ilerp", title="Inverse Lerp Image", tags=["image", "ilerp"], category="image", version="1.0.0") +@invocation( + "img_ilerp", + title="Inverse Lerp Image", + tags=["image", "ilerp"], + category="image", + version="1.0.0", +) class ImageInverseLerpInvocation(BaseInvocation): """Inverse linear interpolation of all pixels of an image""" @@ -456,7 +528,7 @@ class ImageInverseLerpInvocation(BaseInvocation): image = context.services.images.get_pil_image(self.image.image_name) image_arr = numpy.asarray(image, dtype=numpy.float32) - image_arr = numpy.minimum(numpy.maximum(image_arr - self.min, 0) / float(self.max - self.min), 1) * 255 + image_arr = numpy.minimum(numpy.maximum(image_arr - self.min, 0) / float(self.max - self.min), 1) * 255 # type: ignore [assignment] ilerp_image = Image.fromarray(numpy.uint8(image_arr)) @@ -477,7 +549,13 @@ class ImageInverseLerpInvocation(BaseInvocation): ) -@invocation("img_nsfw", title="Blur NSFW Image", tags=["image", "nsfw"], category="image", version="1.0.0") +@invocation( + "img_nsfw", + title="Blur NSFW Image", + tags=["image", "nsfw"], + category="image", + version="1.0.0", +) class ImageNSFWBlurInvocation(BaseInvocation): """Add blur to NSFW-flagged images""" @@ -505,7 +583,7 @@ class ImageNSFWBlurInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) @@ -515,7 +593,7 @@ class ImageNSFWBlurInvocation(BaseInvocation): height=image_dto.height, ) - def _get_caution_img(self) -> Image: + def _get_caution_img(self) -> Image.Image: import invokeai.app.assets.images as image_assets caution = Image.open(Path(image_assets.__path__[0]) / "caution.png") @@ -523,7 +601,11 @@ class ImageNSFWBlurInvocation(BaseInvocation): @invocation( - "img_watermark", title="Add Invisible Watermark", tags=["image", "watermark"], category="image", version="1.0.0" + "img_watermark", + title="Add Invisible Watermark", + tags=["image", "watermark"], + category="image", + version="1.0.0", ) class ImageWatermarkInvocation(BaseInvocation): """Add an invisible watermark to an image""" @@ -544,7 +626,7 @@ class ImageWatermarkInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) @@ -555,7 +637,13 @@ class ImageWatermarkInvocation(BaseInvocation): ) -@invocation("mask_edge", title="Mask Edge", tags=["image", "mask", "inpaint"], category="image", version="1.0.0") +@invocation( + "mask_edge", + title="Mask Edge", + tags=["image", "mask", "inpaint"], + category="image", + version="1.0.0", +) class MaskEdgeInvocation(BaseInvocation): """Applies an edge mask to an image""" @@ -601,7 +689,11 @@ class MaskEdgeInvocation(BaseInvocation): @invocation( - "mask_combine", title="Combine Masks", tags=["image", "mask", "multiply"], category="image", version="1.0.0" + "mask_combine", + title="Combine Masks", + tags=["image", "mask", "multiply"], + category="image", + version="1.0.0", ) class MaskCombineInvocation(BaseInvocation): """Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`.""" @@ -632,7 +724,13 @@ class MaskCombineInvocation(BaseInvocation): ) -@invocation("color_correct", title="Color Correct", tags=["image", "color"], category="image", version="1.0.0") +@invocation( + "color_correct", + title="Color Correct", + tags=["image", "color"], + category="image", + version="1.0.0", +) class ColorCorrectInvocation(BaseInvocation): """ Shifts the colors of a target image to match the reference image, optionally @@ -742,7 +840,13 @@ class ColorCorrectInvocation(BaseInvocation): ) -@invocation("img_hue_adjust", title="Adjust Image Hue", tags=["image", "hue"], category="image", version="1.0.0") +@invocation( + "img_hue_adjust", + title="Adjust Image Hue", + tags=["image", "hue"], + category="image", + version="1.0.0", +) class ImageHueAdjustmentInvocation(BaseInvocation): """Adjusts the Hue of an image.""" @@ -980,7 +1084,7 @@ class SaveImageInvocation(BaseInvocation): image: ImageField = InputField(description=FieldDescriptions.image) board: Optional[BoardField] = InputField(default=None, description=FieldDescriptions.board, input=Input.Direct) - metadata: CoreMetadata = InputField( + metadata: Optional[CoreMetadata] = InputField( default=None, description=FieldDescriptions.core_metadata, ui_hidden=True, @@ -997,7 +1101,7 @@ class SaveImageInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) diff --git a/invokeai/app/invocations/ip_adapter.py b/invokeai/app/invocations/ip_adapter.py index 3e3a3d9b1f..81fd1f9f5d 100644 --- a/invokeai/app/invocations/ip_adapter.py +++ b/invokeai/app/invocations/ip_adapter.py @@ -2,7 +2,7 @@ import os from builtins import float from typing import List, Union -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from invokeai.app.invocations.baseinvocation import ( BaseInvocation, @@ -25,11 +25,15 @@ class IPAdapterModelField(BaseModel): model_name: str = Field(description="Name of the IP-Adapter model") base_model: BaseModelType = Field(description="Base model") + model_config = ConfigDict(protected_namespaces=()) + class CLIPVisionModelField(BaseModel): model_name: str = Field(description="Name of the CLIP Vision image encoder model") base_model: BaseModelType = Field(description="Base model (usually 'Any')") + model_config = ConfigDict(protected_namespaces=()) + class IPAdapterField(BaseModel): image: ImageField = Field(description="The IP-Adapter image prompt.") diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 7ca8cbbe6c..7ce0ae7a8a 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -19,7 +19,7 @@ from diffusers.models.attention_processor import ( ) from diffusers.schedulers import DPMSolverSDEScheduler from diffusers.schedulers import SchedulerMixin as Scheduler -from pydantic import validator +from pydantic import field_validator from torchvision.transforms.functional import resize as tv_resize from invokeai.app.invocations.ip_adapter import IPAdapterField @@ -84,12 +84,20 @@ class SchedulerOutput(BaseInvocationOutput): scheduler: SAMPLER_NAME_VALUES = OutputField(description=FieldDescriptions.scheduler, ui_type=UIType.Scheduler) -@invocation("scheduler", title="Scheduler", tags=["scheduler"], category="latents", version="1.0.0") +@invocation( + "scheduler", + title="Scheduler", + tags=["scheduler"], + category="latents", + version="1.0.0", +) class SchedulerInvocation(BaseInvocation): """Selects a scheduler.""" scheduler: SAMPLER_NAME_VALUES = InputField( - default="euler", description=FieldDescriptions.scheduler, ui_type=UIType.Scheduler + default="euler", + description=FieldDescriptions.scheduler, + ui_type=UIType.Scheduler, ) def invoke(self, context: InvocationContext) -> SchedulerOutput: @@ -97,7 +105,11 @@ class SchedulerInvocation(BaseInvocation): @invocation( - "create_denoise_mask", title="Create Denoise Mask", tags=["mask", "denoise"], category="latents", version="1.0.0" + "create_denoise_mask", + title="Create Denoise Mask", + tags=["mask", "denoise"], + category="latents", + version="1.0.0", ) class CreateDenoiseMaskInvocation(BaseInvocation): """Creates mask for denoising model run.""" @@ -106,7 +118,11 @@ class CreateDenoiseMaskInvocation(BaseInvocation): image: Optional[ImageField] = InputField(default=None, description="Image which will be masked", ui_order=1) mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2) tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3) - fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32, ui_order=4) + fp32: bool = InputField( + default=DEFAULT_PRECISION == "float32", + description=FieldDescriptions.fp32, + ui_order=4, + ) def prep_mask_tensor(self, mask_image): if mask_image.mode != "L": @@ -134,7 +150,7 @@ class CreateDenoiseMaskInvocation(BaseInvocation): if image is not None: vae_info = context.services.model_manager.get_model( - **self.vae.vae.dict(), + **self.vae.vae.model_dump(), context=context, ) @@ -167,7 +183,7 @@ def get_scheduler( ) -> Scheduler: scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP["ddim"]) orig_scheduler_info = context.services.model_manager.get_model( - **scheduler_info.dict(), + **scheduler_info.model_dump(), context=context, ) with orig_scheduler_info as orig_scheduler: @@ -209,34 +225,64 @@ class DenoiseLatentsInvocation(BaseInvocation): negative_conditioning: ConditioningField = InputField( description=FieldDescriptions.negative_cond, input=Input.Connection, ui_order=1 ) - noise: Optional[LatentsField] = InputField(description=FieldDescriptions.noise, input=Input.Connection, ui_order=3) + noise: Optional[LatentsField] = InputField( + default=None, + description=FieldDescriptions.noise, + input=Input.Connection, + ui_order=3, + ) steps: int = InputField(default=10, gt=0, description=FieldDescriptions.steps) cfg_scale: Union[float, List[float]] = InputField( default=7.5, ge=1, description=FieldDescriptions.cfg_scale, title="CFG Scale" ) - denoising_start: float = InputField(default=0.0, ge=0, le=1, description=FieldDescriptions.denoising_start) + denoising_start: float = InputField( + default=0.0, + ge=0, + le=1, + description=FieldDescriptions.denoising_start, + ) denoising_end: float = InputField(default=1.0, ge=0, le=1, description=FieldDescriptions.denoising_end) scheduler: SAMPLER_NAME_VALUES = InputField( - default="euler", description=FieldDescriptions.scheduler, ui_type=UIType.Scheduler + default="euler", + description=FieldDescriptions.scheduler, + ui_type=UIType.Scheduler, ) - unet: UNetField = InputField(description=FieldDescriptions.unet, input=Input.Connection, title="UNet", ui_order=2) - control: Union[ControlField, list[ControlField]] = InputField( + unet: UNetField = InputField( + description=FieldDescriptions.unet, + input=Input.Connection, + title="UNet", + ui_order=2, + ) + control: Optional[Union[ControlField, list[ControlField]]] = InputField( default=None, input=Input.Connection, ui_order=5, ) ip_adapter: Optional[Union[IPAdapterField, list[IPAdapterField]]] = InputField( - description=FieldDescriptions.ip_adapter, title="IP-Adapter", default=None, input=Input.Connection, ui_order=6 + description=FieldDescriptions.ip_adapter, + title="IP-Adapter", + default=None, + input=Input.Connection, + ui_order=6, ) - t2i_adapter: Union[T2IAdapterField, list[T2IAdapterField]] = InputField( - description=FieldDescriptions.t2i_adapter, title="T2I-Adapter", default=None, input=Input.Connection, ui_order=7 + t2i_adapter: Optional[Union[T2IAdapterField, list[T2IAdapterField]]] = InputField( + description=FieldDescriptions.t2i_adapter, + title="T2I-Adapter", + default=None, + input=Input.Connection, + ui_order=7, + ) + latents: Optional[LatentsField] = InputField( + default=None, description=FieldDescriptions.latents, input=Input.Connection ) - latents: Optional[LatentsField] = InputField(description=FieldDescriptions.latents, input=Input.Connection) denoise_mask: Optional[DenoiseMaskField] = InputField( - default=None, description=FieldDescriptions.mask, input=Input.Connection, ui_order=8 + default=None, + description=FieldDescriptions.mask, + input=Input.Connection, + ui_order=8, ) - @validator("cfg_scale") + @field_validator("cfg_scale") def ge_one(cls, v): """validate that all cfg_scale values are >= 1""" if isinstance(v, list): @@ -259,7 +305,7 @@ class DenoiseLatentsInvocation(BaseInvocation): stable_diffusion_step_callback( context=context, intermediate_state=intermediate_state, - node=self.dict(), + node=self.model_dump(), source_node_id=source_node_id, base_model=base_model, ) @@ -451,9 +497,10 @@ class DenoiseLatentsInvocation(BaseInvocation): # models are needed in memory. This would help to reduce peak memory utilization in low-memory environments. with image_encoder_model_info as image_encoder_model: # Get image embeddings from CLIP and ImageProjModel. - image_prompt_embeds, uncond_image_prompt_embeds = ip_adapter_model.get_image_embeds( - input_image, image_encoder_model - ) + ( + image_prompt_embeds, + uncond_image_prompt_embeds, + ) = ip_adapter_model.get_image_embeds(input_image, image_encoder_model) conditioning_data.ip_adapter_conditioning.append( IPAdapterConditioningInfo(image_prompt_embeds, uncond_image_prompt_embeds) ) @@ -628,7 +675,10 @@ class DenoiseLatentsInvocation(BaseInvocation): # TODO(ryand): I have hard-coded `do_classifier_free_guidance=True` to mirror the behaviour of ControlNets, # below. Investigate whether this is appropriate. t2i_adapter_data = self.run_t2i_adapters( - context, self.t2i_adapter, latents.shape, do_classifier_free_guidance=True + context, + self.t2i_adapter, + latents.shape, + do_classifier_free_guidance=True, ) # Get the source node id (we are invoking the prepared node) @@ -641,7 +691,7 @@ class DenoiseLatentsInvocation(BaseInvocation): def _lora_loader(): for lora in self.unet.loras: lora_info = context.services.model_manager.get_model( - **lora.dict(exclude={"weight"}), + **lora.model_dump(exclude={"weight"}), context=context, ) yield (lora_info.context.model, lora.weight) @@ -649,7 +699,7 @@ class DenoiseLatentsInvocation(BaseInvocation): return unet_info = context.services.model_manager.get_model( - **self.unet.unet.dict(), + **self.unet.unet.model_dump(), context=context, ) with ( @@ -700,7 +750,10 @@ class DenoiseLatentsInvocation(BaseInvocation): denoising_end=self.denoising_end, ) - result_latents, result_attention_map_saver = pipeline.latents_from_embeddings( + ( + result_latents, + result_attention_map_saver, + ) = pipeline.latents_from_embeddings( latents=latents, timesteps=timesteps, init_timestep=init_timestep, @@ -728,7 +781,11 @@ class DenoiseLatentsInvocation(BaseInvocation): @invocation( - "l2i", title="Latents to Image", tags=["latents", "image", "vae", "l2i"], category="latents", version="1.0.0" + "l2i", + title="Latents to Image", + tags=["latents", "image", "vae", "l2i"], + category="latents", + version="1.0.0", ) class LatentsToImageInvocation(BaseInvocation): """Generates an image from latents.""" @@ -743,7 +800,7 @@ class LatentsToImageInvocation(BaseInvocation): ) tiled: bool = InputField(default=False, description=FieldDescriptions.tiled) fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32) - metadata: CoreMetadata = InputField( + metadata: Optional[CoreMetadata] = InputField( default=None, description=FieldDescriptions.core_metadata, ui_hidden=True, @@ -754,7 +811,7 @@ class LatentsToImageInvocation(BaseInvocation): latents = context.services.latents.get(self.latents.latents_name) vae_info = context.services.model_manager.get_model( - **self.vae.vae.dict(), + **self.vae.vae.model_dump(), context=context, ) @@ -816,7 +873,7 @@ class LatentsToImageInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) @@ -830,7 +887,13 @@ class LatentsToImageInvocation(BaseInvocation): LATENTS_INTERPOLATION_MODE = Literal["nearest", "linear", "bilinear", "bicubic", "trilinear", "area", "nearest-exact"] -@invocation("lresize", title="Resize Latents", tags=["latents", "resize"], category="latents", version="1.0.0") +@invocation( + "lresize", + title="Resize Latents", + tags=["latents", "resize"], + category="latents", + version="1.0.0", +) class ResizeLatentsInvocation(BaseInvocation): """Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8.""" @@ -876,7 +939,13 @@ class ResizeLatentsInvocation(BaseInvocation): return build_latents_output(latents_name=name, latents=resized_latents, seed=self.latents.seed) -@invocation("lscale", title="Scale Latents", tags=["latents", "resize"], category="latents", version="1.0.0") +@invocation( + "lscale", + title="Scale Latents", + tags=["latents", "resize"], + category="latents", + version="1.0.0", +) class ScaleLatentsInvocation(BaseInvocation): """Scales latents by a given factor.""" @@ -915,7 +984,11 @@ class ScaleLatentsInvocation(BaseInvocation): @invocation( - "i2l", title="Image to Latents", tags=["latents", "image", "vae", "i2l"], category="latents", version="1.0.0" + "i2l", + title="Image to Latents", + tags=["latents", "image", "vae", "i2l"], + category="latents", + version="1.0.0", ) class ImageToLatentsInvocation(BaseInvocation): """Encodes an image into latents.""" @@ -979,7 +1052,7 @@ class ImageToLatentsInvocation(BaseInvocation): image = context.services.images.get_pil_image(self.image.image_name) vae_info = context.services.model_manager.get_model( - **self.vae.vae.dict(), + **self.vae.vae.model_dump(), context=context, ) @@ -1007,7 +1080,13 @@ class ImageToLatentsInvocation(BaseInvocation): return vae.encode(image_tensor).latents -@invocation("lblend", title="Blend Latents", tags=["latents", "blend"], category="latents", version="1.0.0") +@invocation( + "lblend", + title="Blend Latents", + tags=["latents", "blend"], + category="latents", + version="1.0.0", +) class BlendLatentsInvocation(BaseInvocation): """Blend two latents using a given alpha. Latents must have same size.""" diff --git a/invokeai/app/invocations/math.py b/invokeai/app/invocations/math.py index b52cbb28bf..2aefa1def4 100644 --- a/invokeai/app/invocations/math.py +++ b/invokeai/app/invocations/math.py @@ -3,7 +3,7 @@ from typing import Literal import numpy as np -from pydantic import validator +from pydantic import field_validator from invokeai.app.invocations.primitives import FloatOutput, IntegerOutput @@ -72,7 +72,14 @@ class RandomIntInvocation(BaseInvocation): return IntegerOutput(value=np.random.randint(self.low, self.high)) -@invocation("rand_float", title="Random Float", tags=["math", "float", "random"], category="math", version="1.0.0") +@invocation( + "rand_float", + title="Random Float", + tags=["math", "float", "random"], + category="math", + version="1.0.1", + use_cache=False, +) class RandomFloatInvocation(BaseInvocation): """Outputs a single random float""" @@ -178,7 +185,7 @@ class IntegerMathInvocation(BaseInvocation): a: int = InputField(default=0, description=FieldDescriptions.num_1) b: int = InputField(default=0, description=FieldDescriptions.num_2) - @validator("b") + @field_validator("b") def no_unrepresentable_results(cls, v, values): if values["operation"] == "DIV" and v == 0: raise ValueError("Cannot divide by zero") @@ -252,7 +259,7 @@ class FloatMathInvocation(BaseInvocation): a: float = InputField(default=0, description=FieldDescriptions.num_1) b: float = InputField(default=0, description=FieldDescriptions.num_2) - @validator("b") + @field_validator("b") def no_unrepresentable_results(cls, v, values): if values["operation"] == "DIV" and v == 0: raise ValueError("Cannot divide by zero") diff --git a/invokeai/app/invocations/metadata.py b/invokeai/app/invocations/metadata.py index 449f332387..9578fc3ae9 100644 --- a/invokeai/app/invocations/metadata.py +++ b/invokeai/app/invocations/metadata.py @@ -223,4 +223,4 @@ class MetadataAccumulatorInvocation(BaseInvocation): def invoke(self, context: InvocationContext) -> MetadataAccumulatorOutput: """Collects and outputs a CoreMetadata object""" - return MetadataAccumulatorOutput(metadata=CoreMetadata(**self.dict())) + return MetadataAccumulatorOutput(metadata=CoreMetadata(**self.model_dump())) diff --git a/invokeai/app/invocations/model.py b/invokeai/app/invocations/model.py index 571cb2e730..dfa1075d6e 100644 --- a/invokeai/app/invocations/model.py +++ b/invokeai/app/invocations/model.py @@ -1,7 +1,7 @@ import copy from typing import List, Optional -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from ...backend.model_management import BaseModelType, ModelType, SubModelType from .baseinvocation import ( @@ -24,6 +24,8 @@ class ModelInfo(BaseModel): model_type: ModelType = Field(description="Info to load submodel") submodel: Optional[SubModelType] = Field(default=None, description="Info to load submodel") + model_config = ConfigDict(protected_namespaces=()) + class LoraInfo(ModelInfo): weight: float = Field(description="Lora's weight which to use when apply to model") @@ -65,6 +67,8 @@ class MainModelField(BaseModel): base_model: BaseModelType = Field(description="Base model") model_type: ModelType = Field(description="Model Type") + model_config = ConfigDict(protected_namespaces=()) + class LoRAModelField(BaseModel): """LoRA model field""" @@ -72,8 +76,16 @@ class LoRAModelField(BaseModel): model_name: str = Field(description="Name of the LoRA model") base_model: BaseModelType = Field(description="Base model") + model_config = ConfigDict(protected_namespaces=()) -@invocation("main_model_loader", title="Main Model", tags=["model"], category="model", version="1.0.0") + +@invocation( + "main_model_loader", + title="Main Model", + tags=["model"], + category="model", + version="1.0.0", +) class MainModelLoaderInvocation(BaseInvocation): """Loads a main model, outputting its submodels.""" @@ -180,10 +192,16 @@ class LoraLoaderInvocation(BaseInvocation): lora: LoRAModelField = InputField(description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA") weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight) unet: Optional[UNetField] = InputField( - default=None, description=FieldDescriptions.unet, input=Input.Connection, title="UNet" + default=None, + description=FieldDescriptions.unet, + input=Input.Connection, + title="UNet", ) clip: Optional[ClipField] = InputField( - default=None, description=FieldDescriptions.clip, input=Input.Connection, title="CLIP" + default=None, + description=FieldDescriptions.clip, + input=Input.Connection, + title="CLIP", ) def invoke(self, context: InvocationContext) -> LoraLoaderOutput: @@ -244,20 +262,35 @@ class SDXLLoraLoaderOutput(BaseInvocationOutput): clip2: Optional[ClipField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP 2") -@invocation("sdxl_lora_loader", title="SDXL LoRA", tags=["lora", "model"], category="model", version="1.0.0") +@invocation( + "sdxl_lora_loader", + title="SDXL LoRA", + tags=["lora", "model"], + category="model", + version="1.0.0", +) class SDXLLoraLoaderInvocation(BaseInvocation): """Apply selected lora to unet and text_encoder.""" lora: LoRAModelField = InputField(description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA") weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight) unet: Optional[UNetField] = InputField( - default=None, description=FieldDescriptions.unet, input=Input.Connection, title="UNet" + default=None, + description=FieldDescriptions.unet, + input=Input.Connection, + title="UNet", ) clip: Optional[ClipField] = InputField( - default=None, description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 1" + default=None, + description=FieldDescriptions.clip, + input=Input.Connection, + title="CLIP 1", ) clip2: Optional[ClipField] = InputField( - default=None, description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 2" + default=None, + description=FieldDescriptions.clip, + input=Input.Connection, + title="CLIP 2", ) def invoke(self, context: InvocationContext) -> SDXLLoraLoaderOutput: @@ -330,6 +363,8 @@ class VAEModelField(BaseModel): model_name: str = Field(description="Name of the model") base_model: BaseModelType = Field(description="Base model") + model_config = ConfigDict(protected_namespaces=()) + @invocation_output("vae_loader_output") class VaeLoaderOutput(BaseInvocationOutput): @@ -343,7 +378,10 @@ class VaeLoaderInvocation(BaseInvocation): """Loads a VAE model, outputting a VaeLoaderOutput""" vae_model: VAEModelField = InputField( - description=FieldDescriptions.vae_model, input=Input.Direct, ui_type=UIType.VaeModel, title="VAE" + description=FieldDescriptions.vae_model, + input=Input.Direct, + ui_type=UIType.VaeModel, + title="VAE", ) def invoke(self, context: InvocationContext) -> VaeLoaderOutput: @@ -372,19 +410,31 @@ class VaeLoaderInvocation(BaseInvocation): class SeamlessModeOutput(BaseInvocationOutput): """Modified Seamless Model output""" - unet: Optional[UNetField] = OutputField(description=FieldDescriptions.unet, title="UNet") - vae: Optional[VaeField] = OutputField(description=FieldDescriptions.vae, title="VAE") + unet: Optional[UNetField] = OutputField(default=None, description=FieldDescriptions.unet, title="UNet") + vae: Optional[VaeField] = OutputField(default=None, description=FieldDescriptions.vae, title="VAE") -@invocation("seamless", title="Seamless", tags=["seamless", "model"], category="model", version="1.0.0") +@invocation( + "seamless", + title="Seamless", + tags=["seamless", "model"], + category="model", + version="1.0.0", +) class SeamlessModeInvocation(BaseInvocation): """Applies the seamless transformation to the Model UNet and VAE.""" unet: Optional[UNetField] = InputField( - default=None, description=FieldDescriptions.unet, input=Input.Connection, title="UNet" + default=None, + description=FieldDescriptions.unet, + input=Input.Connection, + title="UNet", ) vae: Optional[VaeField] = InputField( - default=None, description=FieldDescriptions.vae_model, input=Input.Connection, title="VAE" + default=None, + description=FieldDescriptions.vae_model, + input=Input.Connection, + title="VAE", ) seamless_y: bool = InputField(default=True, input=Input.Any, description="Specify whether Y axis is seamless") seamless_x: bool = InputField(default=True, input=Input.Any, description="Specify whether X axis is seamless") diff --git a/invokeai/app/invocations/noise.py b/invokeai/app/invocations/noise.py index c46747aa89..3c1651a2f0 100644 --- a/invokeai/app/invocations/noise.py +++ b/invokeai/app/invocations/noise.py @@ -2,7 +2,7 @@ import torch -from pydantic import validator +from pydantic import field_validator from invokeai.app.invocations.latent import LatentsField from invokeai.app.util.misc import SEED_MAX, get_random_seed @@ -65,7 +65,7 @@ Nodes class NoiseOutput(BaseInvocationOutput): """Invocation noise output""" - noise: LatentsField = OutputField(default=None, description=FieldDescriptions.noise) + noise: LatentsField = OutputField(description=FieldDescriptions.noise) width: int = OutputField(description=FieldDescriptions.width) height: int = OutputField(description=FieldDescriptions.height) @@ -78,7 +78,13 @@ def build_noise_output(latents_name: str, latents: torch.Tensor, seed: int): ) -@invocation("noise", title="Noise", tags=["latents", "noise"], category="latents", version="1.0.0") +@invocation( + "noise", + title="Noise", + tags=["latents", "noise"], + category="latents", + version="1.0.0", +) class NoiseInvocation(BaseInvocation): """Generates latent noise.""" @@ -105,7 +111,7 @@ class NoiseInvocation(BaseInvocation): description="Use CPU for noise generation (for reproducible results across platforms)", ) - @validator("seed", pre=True) + @field_validator("seed", mode="before") def modulo_seed(cls, v): """Returns the seed modulo (SEED_MAX + 1) to ensure it is within the valid range.""" return v % (SEED_MAX + 1) diff --git a/invokeai/app/invocations/onnx.py b/invokeai/app/invocations/onnx.py index 35f8ed965e..3f4f688cf4 100644 --- a/invokeai/app/invocations/onnx.py +++ b/invokeai/app/invocations/onnx.py @@ -9,7 +9,7 @@ from typing import List, Literal, Optional, Union import numpy as np import torch from diffusers.image_processor import VaeImageProcessor -from pydantic import BaseModel, Field, validator +from pydantic import BaseModel, ConfigDict, Field, field_validator from tqdm import tqdm from invokeai.app.invocations.metadata import CoreMetadata @@ -63,14 +63,17 @@ class ONNXPromptInvocation(BaseInvocation): def invoke(self, context: InvocationContext) -> ConditioningOutput: tokenizer_info = context.services.model_manager.get_model( - **self.clip.tokenizer.dict(), + **self.clip.tokenizer.model_dump(), ) text_encoder_info = context.services.model_manager.get_model( - **self.clip.text_encoder.dict(), + **self.clip.text_encoder.model_dump(), ) with tokenizer_info as orig_tokenizer, text_encoder_info as text_encoder: # , ExitStack() as stack: loras = [ - (context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) + ( + context.services.model_manager.get_model(**lora.model_dump(exclude={"weight"})).context.model, + lora.weight, + ) for lora in self.clip.loras ] @@ -175,14 +178,14 @@ class ONNXTextToLatentsInvocation(BaseInvocation): description=FieldDescriptions.unet, input=Input.Connection, ) - control: Optional[Union[ControlField, list[ControlField]]] = InputField( + control: Union[ControlField, list[ControlField]] = InputField( default=None, description=FieldDescriptions.control, ) # seamless: bool = InputField(default=False, description="Whether or not to generate an image that can tile without seams", ) # seamless_axes: str = InputField(default="", description="The axes to tile the image on, 'x' and/or 'y'") - @validator("cfg_scale") + @field_validator("cfg_scale") def ge_one(cls, v): """validate that all cfg_scale values are >= 1""" if isinstance(v, list): @@ -241,7 +244,7 @@ class ONNXTextToLatentsInvocation(BaseInvocation): stable_diffusion_step_callback( context=context, intermediate_state=intermediate_state, - node=self.dict(), + node=self.model_dump(), source_node_id=source_node_id, ) @@ -254,12 +257,15 @@ class ONNXTextToLatentsInvocation(BaseInvocation): eta=0.0, ) - unet_info = context.services.model_manager.get_model(**self.unet.unet.dict()) + unet_info = context.services.model_manager.get_model(**self.unet.unet.model_dump()) with unet_info as unet: # , ExitStack() as stack: # loras = [(stack.enter_context(context.services.model_manager.get_model(**lora.dict(exclude={"weight"}))), lora.weight) for lora in self.unet.loras] loras = [ - (context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) + ( + context.services.model_manager.get_model(**lora.model_dump(exclude={"weight"})).context.model, + lora.weight, + ) for lora in self.unet.loras ] @@ -346,7 +352,7 @@ class ONNXLatentsToImageInvocation(BaseInvocation): raise Exception(f"Expected vae_decoder, found: {self.vae.vae.model_type}") vae_info = context.services.model_manager.get_model( - **self.vae.vae.dict(), + **self.vae.vae.model_dump(), ) # clear memory as vae decode can request a lot @@ -375,7 +381,7 @@ class ONNXLatentsToImageInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, - metadata=self.metadata.dict() if self.metadata else None, + metadata=self.metadata.model_dump() if self.metadata else None, workflow=self.workflow, ) @@ -403,6 +409,8 @@ class OnnxModelField(BaseModel): base_model: BaseModelType = Field(description="Base model") model_type: ModelType = Field(description="Model Type") + model_config = ConfigDict(protected_namespaces=()) + @invocation("onnx_model_loader", title="ONNX Main Model", tags=["onnx", "model"], category="model", version="1.0.0") class OnnxModelLoaderInvocation(BaseInvocation): diff --git a/invokeai/app/invocations/param_easing.py b/invokeai/app/invocations/param_easing.py index 7c327a6657..0e86fb978b 100644 --- a/invokeai/app/invocations/param_easing.py +++ b/invokeai/app/invocations/param_easing.py @@ -44,13 +44,22 @@ from invokeai.app.invocations.primitives import FloatCollectionOutput from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation -@invocation("float_range", title="Float Range", tags=["math", "range"], category="math", version="1.0.0") +@invocation( + "float_range", + title="Float Range", + tags=["math", "range"], + category="math", + version="1.0.0", +) class FloatLinearRangeInvocation(BaseInvocation): """Creates a range""" start: float = InputField(default=5, description="The first value of the range") stop: float = InputField(default=10, description="The last value of the range") - steps: int = InputField(default=30, description="number of values to interpolate over (including start and stop)") + steps: int = InputField( + default=30, + description="number of values to interpolate over (including start and stop)", + ) def invoke(self, context: InvocationContext) -> FloatCollectionOutput: param_list = list(np.linspace(self.start, self.stop, self.steps)) @@ -95,7 +104,13 @@ EASING_FUNCTION_KEYS = Literal[tuple(list(EASING_FUNCTIONS_MAP.keys()))] # actually I think for now could just use CollectionOutput (which is list[Any] -@invocation("step_param_easing", title="Step Param Easing", tags=["step", "easing"], category="step", version="1.0.0") +@invocation( + "step_param_easing", + title="Step Param Easing", + tags=["step", "easing"], + category="step", + version="1.0.0", +) class StepParamEasingInvocation(BaseInvocation): """Experimental per-step parameter easing for denoising steps""" @@ -159,7 +174,9 @@ class StepParamEasingInvocation(BaseInvocation): context.services.logger.debug("base easing duration: " + str(base_easing_duration)) even_num_steps = num_easing_steps % 2 == 0 # even number of steps easing_function = easing_class( - start=self.start_value, end=self.end_value, duration=base_easing_duration - 1 + start=self.start_value, + end=self.end_value, + duration=base_easing_duration - 1, ) base_easing_vals = list() for step_index in range(base_easing_duration): @@ -199,7 +216,11 @@ class StepParamEasingInvocation(BaseInvocation): # else: # no mirroring (default) - easing_function = easing_class(start=self.start_value, end=self.end_value, duration=num_easing_steps - 1) + easing_function = easing_class( + start=self.start_value, + end=self.end_value, + duration=num_easing_steps - 1, + ) for step_index in range(num_easing_steps): step_val = easing_function.ease(step_index) easing_list.append(step_val) diff --git a/invokeai/app/invocations/prompt.py b/invokeai/app/invocations/prompt.py index b3d482b779..cb43a52447 100644 --- a/invokeai/app/invocations/prompt.py +++ b/invokeai/app/invocations/prompt.py @@ -3,7 +3,7 @@ from typing import Optional, Union import numpy as np from dynamicprompts.generators import CombinatorialPromptGenerator, RandomPromptGenerator -from pydantic import validator +from pydantic import field_validator from invokeai.app.invocations.primitives import StringCollectionOutput @@ -21,7 +21,10 @@ from .baseinvocation import BaseInvocation, InputField, InvocationContext, UICom class DynamicPromptInvocation(BaseInvocation): """Parses a prompt using adieyal/dynamicprompts' random or combinatorial generator""" - prompt: str = InputField(description="The prompt to parse with dynamicprompts", ui_component=UIComponent.Textarea) + prompt: str = InputField( + description="The prompt to parse with dynamicprompts", + ui_component=UIComponent.Textarea, + ) max_prompts: int = InputField(default=1, description="The number of prompts to generate") combinatorial: bool = InputField(default=False, description="Whether to use the combinatorial generator") @@ -36,21 +39,31 @@ class DynamicPromptInvocation(BaseInvocation): return StringCollectionOutput(collection=prompts) -@invocation("prompt_from_file", title="Prompts from File", tags=["prompt", "file"], category="prompt", version="1.0.0") +@invocation( + "prompt_from_file", + title="Prompts from File", + tags=["prompt", "file"], + category="prompt", + version="1.0.0", +) class PromptsFromFileInvocation(BaseInvocation): """Loads prompts from a text file""" file_path: str = InputField(description="Path to prompt text file") pre_prompt: Optional[str] = InputField( - default=None, description="String to prepend to each prompt", ui_component=UIComponent.Textarea + default=None, + description="String to prepend to each prompt", + ui_component=UIComponent.Textarea, ) post_prompt: Optional[str] = InputField( - default=None, description="String to append to each prompt", ui_component=UIComponent.Textarea + default=None, + description="String to append to each prompt", + ui_component=UIComponent.Textarea, ) start_line: int = InputField(default=1, ge=1, description="Line in the file to start start from") max_prompts: int = InputField(default=1, ge=0, description="Max lines to read from file (0=all)") - @validator("file_path") + @field_validator("file_path") def file_path_exists(cls, v): if not exists(v): raise ValueError(FileNotFoundError) @@ -79,6 +92,10 @@ class PromptsFromFileInvocation(BaseInvocation): def invoke(self, context: InvocationContext) -> StringCollectionOutput: prompts = self.promptsFromFile( - self.file_path, self.pre_prompt, self.post_prompt, self.start_line, self.max_prompts + self.file_path, + self.pre_prompt, + self.post_prompt, + self.start_line, + self.max_prompts, ) return StringCollectionOutput(collection=prompts) diff --git a/invokeai/app/invocations/t2i_adapter.py b/invokeai/app/invocations/t2i_adapter.py index e1bd8d0d04..76c250a552 100644 --- a/invokeai/app/invocations/t2i_adapter.py +++ b/invokeai/app/invocations/t2i_adapter.py @@ -1,6 +1,6 @@ from typing import Union -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from invokeai.app.invocations.baseinvocation import ( BaseInvocation, @@ -23,6 +23,8 @@ class T2IAdapterModelField(BaseModel): model_name: str = Field(description="Name of the T2I-Adapter model") base_model: BaseModelType = Field(description="Base model") + model_config = ConfigDict(protected_namespaces=()) + class T2IAdapterField(BaseModel): image: ImageField = Field(description="The T2I-Adapter image prompt.") diff --git a/invokeai/app/invocations/upscale.py b/invokeai/app/invocations/upscale.py index e26c1b9084..d30bb71d95 100644 --- a/invokeai/app/invocations/upscale.py +++ b/invokeai/app/invocations/upscale.py @@ -7,6 +7,7 @@ import numpy as np import torch from basicsr.archs.rrdbnet_arch import RRDBNet from PIL import Image +from pydantic import ConfigDict from realesrgan import RealESRGANer from invokeai.app.invocations.primitives import ImageField, ImageOutput @@ -38,6 +39,8 @@ class ESRGANInvocation(BaseInvocation): default=400, ge=0, description="Tile size for tiled ESRGAN upscaling (0=tiling disabled)" ) + model_config = ConfigDict(protected_namespaces=()) + def invoke(self, context: InvocationContext) -> ImageOutput: image = context.services.images.get_pil_image(self.image.image_name) models_path = context.services.configuration.models_path diff --git a/invokeai/app/services/board_records/board_records_common.py b/invokeai/app/services/board_records/board_records_common.py index e0264dde0d..d08951b499 100644 --- a/invokeai/app/services/board_records/board_records_common.py +++ b/invokeai/app/services/board_records/board_records_common.py @@ -1,7 +1,7 @@ from datetime import datetime from typing import Optional, Union -from pydantic import BaseModel, Extra, Field +from pydantic import BaseModel, Field from invokeai.app.util.misc import get_iso_timestamp from invokeai.app.util.model_exclude_null import BaseModelExcludeNull @@ -18,9 +18,9 @@ class BoardRecord(BaseModelExcludeNull): """The created timestamp of the image.""" updated_at: Union[datetime, str] = Field(description="The updated timestamp of the board.") """The updated timestamp of the image.""" - deleted_at: Union[datetime, str, None] = Field(description="The deleted timestamp of the board.") + deleted_at: Optional[Union[datetime, str]] = Field(default=None, description="The deleted timestamp of the board.") """The updated timestamp of the image.""" - cover_image_name: Optional[str] = Field(description="The name of the cover image of the board.") + cover_image_name: Optional[str] = Field(default=None, description="The name of the cover image of the board.") """The name of the cover image of the board.""" @@ -46,9 +46,9 @@ def deserialize_board_record(board_dict: dict) -> BoardRecord: ) -class BoardChanges(BaseModel, extra=Extra.forbid): - board_name: Optional[str] = Field(description="The board's new name.") - cover_image_name: Optional[str] = Field(description="The name of the board's new cover image.") +class BoardChanges(BaseModel, extra="forbid"): + board_name: Optional[str] = Field(default=None, description="The board's new name.") + cover_image_name: Optional[str] = Field(default=None, description="The name of the board's new cover image.") class BoardRecordNotFoundException(Exception): diff --git a/invokeai/app/services/boards/boards_common.py b/invokeai/app/services/boards/boards_common.py index e22e1915fe..0cb54102bb 100644 --- a/invokeai/app/services/boards/boards_common.py +++ b/invokeai/app/services/boards/boards_common.py @@ -17,7 +17,7 @@ class BoardDTO(BoardRecord): def board_record_to_dto(board_record: BoardRecord, cover_image_name: Optional[str], image_count: int) -> BoardDTO: """Converts a board record to a board DTO.""" return BoardDTO( - **board_record.dict(exclude={"cover_image_name"}), + **board_record.model_dump(exclude={"cover_image_name"}), cover_image_name=cover_image_name, image_count=image_count, ) diff --git a/invokeai/app/services/config/config_base.py b/invokeai/app/services/config/config_base.py index a07e14252a..9405c1dfae 100644 --- a/invokeai/app/services/config/config_base.py +++ b/invokeai/app/services/config/config_base.py @@ -18,7 +18,7 @@ from pathlib import Path from typing import ClassVar, Dict, List, Literal, Optional, Union, get_args, get_origin, get_type_hints from omegaconf import DictConfig, ListConfig, OmegaConf -from pydantic import BaseSettings +from pydantic_settings import BaseSettings, SettingsConfigDict from invokeai.app.services.config.config_common import PagingArgumentParser, int_or_float_or_str @@ -32,12 +32,14 @@ class InvokeAISettings(BaseSettings): initconf: ClassVar[Optional[DictConfig]] = None argparse_groups: ClassVar[Dict] = {} + model_config = SettingsConfigDict(env_file_encoding="utf-8", arbitrary_types_allowed=True, case_sensitive=True) + def parse_args(self, argv: Optional[list] = sys.argv[1:]): parser = self.get_parser() opt, unknown_opts = parser.parse_known_args(argv) if len(unknown_opts) > 0: print("Unknown args:", unknown_opts) - for name in self.__fields__: + for name in self.model_fields: if name not in self._excluded(): value = getattr(opt, name) if isinstance(value, ListConfig): @@ -54,10 +56,12 @@ class InvokeAISettings(BaseSettings): cls = self.__class__ type = get_args(get_type_hints(cls)["type"])[0] field_dict = dict({type: dict()}) - for name, field in self.__fields__.items(): + for name, field in self.model_fields.items(): if name in cls._excluded_from_yaml(): continue - category = field.field_info.extra.get("category") or "Uncategorized" + category = ( + field.json_schema_extra.get("category", "Uncategorized") if field.json_schema_extra else "Uncategorized" + ) value = getattr(self, name) if category not in field_dict[type]: field_dict[type][category] = dict() @@ -73,7 +77,7 @@ class InvokeAISettings(BaseSettings): else: settings_stanza = "Uncategorized" - env_prefix = getattr(cls.Config, "env_prefix", None) + env_prefix = getattr(cls.model_config, "env_prefix", None) env_prefix = env_prefix if env_prefix is not None else settings_stanza.upper() initconf = ( @@ -89,14 +93,18 @@ class InvokeAISettings(BaseSettings): for key, value in os.environ.items(): upcase_environ[key.upper()] = value - fields = cls.__fields__ + fields = cls.model_fields cls.argparse_groups = {} for name, field in fields.items(): if name not in cls._excluded(): current_default = field.default - category = field.field_info.extra.get("category", "Uncategorized") + category = ( + field.json_schema_extra.get("category", "Uncategorized") + if field.json_schema_extra + else "Uncategorized" + ) env_name = env_prefix + "_" + name if category in initconf and name in initconf.get(category): field.default = initconf.get(category).get(name) @@ -146,11 +154,6 @@ class InvokeAISettings(BaseSettings): "tiled_decode", ] - class Config: - env_file_encoding = "utf-8" - arbitrary_types_allowed = True - case_sensitive = True - @classmethod def add_field_argument(cls, command_parser, name: str, field, default_override=None): field_type = get_type_hints(cls).get(name) @@ -161,7 +164,7 @@ class InvokeAISettings(BaseSettings): if field.default_factory is None else field.default_factory() ) - if category := field.field_info.extra.get("category"): + if category := (field.json_schema_extra.get("category", None) if field.json_schema_extra else None): if category not in cls.argparse_groups: cls.argparse_groups[category] = command_parser.add_argument_group(category) argparse_group = cls.argparse_groups[category] @@ -169,7 +172,7 @@ class InvokeAISettings(BaseSettings): argparse_group = command_parser if get_origin(field_type) == Literal: - allowed_values = get_args(field.type_) + allowed_values = get_args(field.annotation) allowed_types = set() for val in allowed_values: allowed_types.add(type(val)) @@ -182,7 +185,7 @@ class InvokeAISettings(BaseSettings): type=field_type, default=default, choices=allowed_values, - help=field.field_info.description, + help=field.description, ) elif get_origin(field_type) == Union: @@ -191,7 +194,7 @@ class InvokeAISettings(BaseSettings): dest=name, type=int_or_float_or_str, default=default, - help=field.field_info.description, + help=field.description, ) elif get_origin(field_type) == list: @@ -199,17 +202,17 @@ class InvokeAISettings(BaseSettings): f"--{name}", dest=name, nargs="*", - type=field.type_, + type=field.annotation, default=default, - action=argparse.BooleanOptionalAction if field.type_ == bool else "store", - help=field.field_info.description, + action=argparse.BooleanOptionalAction if field.annotation == bool else "store", + help=field.description, ) else: argparse_group.add_argument( f"--{name}", dest=name, - type=field.type_, + type=field.annotation, default=default, - action=argparse.BooleanOptionalAction if field.type_ == bool else "store", - help=field.field_info.description, + action=argparse.BooleanOptionalAction if field.annotation == bool else "store", + help=field.description, ) diff --git a/invokeai/app/services/config/config_default.py b/invokeai/app/services/config/config_default.py index 2a42c99bd8..df01b65882 100644 --- a/invokeai/app/services/config/config_default.py +++ b/invokeai/app/services/config/config_default.py @@ -144,8 +144,8 @@ which is set to the desired top-level name. For example, to create a class InvokeBatch(InvokeAISettings): type: Literal["InvokeBatch"] = "InvokeBatch" - node_count : int = Field(default=1, description="Number of nodes to run on", category='Resources') - cpu_count : int = Field(default=8, description="Number of GPUs to run on per node", category='Resources') + node_count : int = Field(default=1, description="Number of nodes to run on", json_schema_extra=dict(category='Resources')) + cpu_count : int = Field(default=8, description="Number of GPUs to run on per node", json_schema_extra=dict(category='Resources')) This will now read and write from the "InvokeBatch" section of the config file, look for environment variables named INVOKEBATCH_*, and @@ -175,7 +175,8 @@ from pathlib import Path from typing import ClassVar, Dict, List, Literal, Optional, Union, get_type_hints from omegaconf import DictConfig, OmegaConf -from pydantic import Field, parse_obj_as +from pydantic import Field, TypeAdapter +from pydantic_settings import SettingsConfigDict from .config_base import InvokeAISettings @@ -185,6 +186,21 @@ LEGACY_INIT_FILE = Path("invokeai.init") DEFAULT_MAX_VRAM = 0.5 +class Categories(object): + WebServer = dict(category="Web Server") + Features = dict(category="Features") + Paths = dict(category="Paths") + Logging = dict(category="Logging") + Development = dict(category="Development") + Other = dict(category="Other") + ModelCache = dict(category="Model Cache") + Device = dict(category="Device") + Generation = dict(category="Generation") + Queue = dict(category="Queue") + Nodes = dict(category="Nodes") + MemoryPerformance = dict(category="Memory/Performance") + + class InvokeAIAppConfig(InvokeAISettings): """ Generate images using Stable Diffusion. Use "invokeai" to launch @@ -201,86 +217,88 @@ class InvokeAIAppConfig(InvokeAISettings): type: Literal["InvokeAI"] = "InvokeAI" # WEB - host : str = Field(default="127.0.0.1", description="IP address to bind to", category='Web Server') - port : int = Field(default=9090, description="Port to bind to", category='Web Server') - allow_origins : List[str] = Field(default=[], description="Allowed CORS origins", category='Web Server') - allow_credentials : bool = Field(default=True, description="Allow CORS credentials", category='Web Server') - allow_methods : List[str] = Field(default=["*"], description="Methods allowed for CORS", category='Web Server') - allow_headers : List[str] = Field(default=["*"], description="Headers allowed for CORS", category='Web Server') + host : str = Field(default="127.0.0.1", description="IP address to bind to", json_schema_extra=Categories.WebServer) + port : int = Field(default=9090, description="Port to bind to", json_schema_extra=Categories.WebServer) + allow_origins : List[str] = Field(default=[], description="Allowed CORS origins", json_schema_extra=Categories.WebServer) + allow_credentials : bool = Field(default=True, description="Allow CORS credentials", json_schema_extra=Categories.WebServer) + allow_methods : List[str] = Field(default=["*"], description="Methods allowed for CORS", json_schema_extra=Categories.WebServer) + allow_headers : List[str] = Field(default=["*"], description="Headers allowed for CORS", json_schema_extra=Categories.WebServer) # FEATURES - esrgan : bool = Field(default=True, description="Enable/disable upscaling code", category='Features') - internet_available : bool = Field(default=True, description="If true, attempt to download models on the fly; otherwise only use local models", category='Features') - log_tokenization : bool = Field(default=False, description="Enable logging of parsed prompt tokens.", category='Features') - patchmatch : bool = Field(default=True, description="Enable/disable patchmatch inpaint code", category='Features') - ignore_missing_core_models : bool = Field(default=False, description='Ignore missing models in models/core/convert', category='Features') + esrgan : bool = Field(default=True, description="Enable/disable upscaling code", json_schema_extra=Categories.Features) + internet_available : bool = Field(default=True, description="If true, attempt to download models on the fly; otherwise only use local models", json_schema_extra=Categories.Features) + log_tokenization : bool = Field(default=False, description="Enable logging of parsed prompt tokens.", json_schema_extra=Categories.Features) + patchmatch : bool = Field(default=True, description="Enable/disable patchmatch inpaint code", json_schema_extra=Categories.Features) + ignore_missing_core_models : bool = Field(default=False, description='Ignore missing models in models/core/convert', json_schema_extra=Categories.Features) # PATHS - root : Path = Field(default=None, description='InvokeAI runtime root directory', category='Paths') - autoimport_dir : Path = Field(default='autoimport', description='Path to a directory of models files to be imported on startup.', category='Paths') - lora_dir : Path = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', category='Paths') - embedding_dir : Path = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', category='Paths') - controlnet_dir : Path = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', category='Paths') - conf_path : Path = Field(default='configs/models.yaml', description='Path to models definition file', category='Paths') - models_dir : Path = Field(default='models', description='Path to the models directory', category='Paths') - legacy_conf_dir : Path = Field(default='configs/stable-diffusion', description='Path to directory of legacy checkpoint config files', category='Paths') - db_dir : Path = Field(default='databases', description='Path to InvokeAI databases directory', category='Paths') - outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths') - use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', category='Paths') - from_file : Path = Field(default=None, description='Take command input from the indicated file (command-line client only)', category='Paths') + root : Optional[Path] = Field(default=None, description='InvokeAI runtime root directory', json_schema_extra=Categories.Paths) + autoimport_dir : Optional[Path] = Field(default=Path('autoimport'), description='Path to a directory of models files to be imported on startup.', json_schema_extra=Categories.Paths) + lora_dir : Optional[Path] = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', json_schema_extra=Categories.Paths) + embedding_dir : Optional[Path] = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', json_schema_extra=Categories.Paths) + controlnet_dir : Optional[Path] = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', json_schema_extra=Categories.Paths) + conf_path : Optional[Path] = Field(default=Path('configs/models.yaml'), description='Path to models definition file', json_schema_extra=Categories.Paths) + models_dir : Optional[Path] = Field(default=Path('models'), description='Path to the models directory', json_schema_extra=Categories.Paths) + legacy_conf_dir : Optional[Path] = Field(default=Path('configs/stable-diffusion'), description='Path to directory of legacy checkpoint config files', json_schema_extra=Categories.Paths) + db_dir : Optional[Path] = Field(default=Path('databases'), description='Path to InvokeAI databases directory', json_schema_extra=Categories.Paths) + outdir : Optional[Path] = Field(default=Path('outputs'), description='Default folder for output images', json_schema_extra=Categories.Paths) + use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', json_schema_extra=Categories.Paths) + from_file : Optional[Path] = Field(default=None, description='Take command input from the indicated file (command-line client only)', json_schema_extra=Categories.Paths) # LOGGING - log_handlers : List[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=", "syslog=path|address:host:port", "http="', category="Logging") + log_handlers : List[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=", "syslog=path|address:host:port", "http="', json_schema_extra=Categories.Logging) # note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues - log_format : Literal['plain', 'color', 'syslog', 'legacy'] = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style', category="Logging") - log_level : Literal["debug", "info", "warning", "error", "critical"] = Field(default="info", description="Emit logging messages at this level or higher", category="Logging") - log_sql : bool = Field(default=False, description="Log SQL queries", category="Logging") + log_format : Literal['plain', 'color', 'syslog', 'legacy'] = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style', json_schema_extra=Categories.Logging) + log_level : Literal["debug", "info", "warning", "error", "critical"] = Field(default="info", description="Emit logging messages at this level or higher", json_schema_extra=Categories.Logging) + log_sql : bool = Field(default=False, description="Log SQL queries", json_schema_extra=Categories.Logging) - dev_reload : bool = Field(default=False, description="Automatically reload when Python sources are changed.", category="Development") + dev_reload : bool = Field(default=False, description="Automatically reload when Python sources are changed.", json_schema_extra=Categories.Development) - version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other") + version : bool = Field(default=False, description="Show InvokeAI version and exit", json_schema_extra=Categories.Other) # CACHE - ram : float = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", category="Model Cache", ) - vram : float = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", category="Model Cache", ) - lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", category="Model Cache", ) + ram : float = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) + vram : float = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) + lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", json_schema_extra=Categories.ModelCache, ) # DEVICE - device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", category="Device", ) - precision : Literal["auto", "float16", "float32", "autocast"] = Field(default="auto", description="Floating point precision", category="Device", ) + device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", json_schema_extra=Categories.Device) + precision : Literal["auto", "float16", "float32", "autocast"] = Field(default="auto", description="Floating point precision", json_schema_extra=Categories.Device) # GENERATION - sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category="Generation", ) - attention_type : Literal["auto", "normal", "xformers", "sliced", "torch-sdp"] = Field(default="auto", description="Attention type", category="Generation", ) - attention_slice_size: Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', category="Generation", ) - force_tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category="Generation",) - force_tiled_decode: bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category="Generation",) - png_compress_level : int = Field(default=6, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", category="Generation", ) + sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", json_schema_extra=Categories.Generation) + attention_type : Literal["auto", "normal", "xformers", "sliced", "torch-sdp"] = Field(default="auto", description="Attention type", json_schema_extra=Categories.Generation) + attention_slice_size: Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', json_schema_extra=Categories.Generation) + force_tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", json_schema_extra=Categories.Generation) + png_compress_level : int = Field(default=6, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", json_schema_extra=Categories.Generation) # QUEUE - max_queue_size : int = Field(default=10000, gt=0, description="Maximum number of items in the session queue", category="Queue", ) + max_queue_size : int = Field(default=10000, gt=0, description="Maximum number of items in the session queue", json_schema_extra=Categories.Queue) # NODES - allow_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to allow. Omit to allow all.", category="Nodes") - deny_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to deny. Omit to deny none.", category="Nodes") - node_cache_size : int = Field(default=512, description="How many cached nodes to keep in memory", category="Nodes", ) + allow_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to allow. Omit to allow all.", json_schema_extra=Categories.Nodes) + deny_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to deny. Omit to deny none.", json_schema_extra=Categories.Nodes) + node_cache_size : int = Field(default=512, description="How many cached nodes to keep in memory", json_schema_extra=Categories.Nodes) # DEPRECATED FIELDS - STILL HERE IN ORDER TO OBTAN VALUES FROM PRE-3.1 CONFIG FILES - always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance') - free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", category='Memory/Performance') - max_cache_size : Optional[float] = Field(default=None, gt=0, description="Maximum memory amount used by model cache for rapid switching", category='Memory/Performance') - max_vram_cache_size : Optional[float] = Field(default=None, ge=0, description="Amount of VRAM reserved for model storage", category='Memory/Performance') - xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance') - tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category='Memory/Performance') + always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", json_schema_extra=Categories.MemoryPerformance) + free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", json_schema_extra=Categories.MemoryPerformance) + max_cache_size : Optional[float] = Field(default=None, gt=0, description="Maximum memory amount used by model cache for rapid switching", json_schema_extra=Categories.MemoryPerformance) + max_vram_cache_size : Optional[float] = Field(default=None, ge=0, description="Amount of VRAM reserved for model storage", json_schema_extra=Categories.MemoryPerformance) + xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", json_schema_extra=Categories.MemoryPerformance) + tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", json_schema_extra=Categories.MemoryPerformance) # See InvokeAIAppConfig subclass below for CACHE and DEVICE categories # fmt: on - class Config: - validate_assignment = True - env_prefix = "INVOKEAI" + model_config = SettingsConfigDict(validate_assignment=True, env_prefix="INVOKEAI") - def parse_args(self, argv: Optional[list[str]] = None, conf: Optional[DictConfig] = None, clobber=False): + def parse_args( + self, + argv: Optional[list[str]] = None, + conf: Optional[DictConfig] = None, + clobber=False, + ): """ Update settings with contents of init file, environment, and command-line settings. @@ -308,7 +326,11 @@ class InvokeAIAppConfig(InvokeAISettings): if self.singleton_init and not clobber: hints = get_type_hints(self.__class__) for k in self.singleton_init: - setattr(self, k, parse_obj_as(hints[k], self.singleton_init[k])) + setattr( + self, + k, + TypeAdapter(hints[k]).validate_python(self.singleton_init[k]), + ) @classmethod def get_config(cls, **kwargs) -> InvokeAIAppConfig: diff --git a/invokeai/app/services/events/events_base.py b/invokeai/app/services/events/events_base.py index 8685db3717..ad00815151 100644 --- a/invokeai/app/services/events/events_base.py +++ b/invokeai/app/services/events/events_base.py @@ -2,7 +2,6 @@ from typing import Any, Optional -from invokeai.app.invocations.model import ModelInfo from invokeai.app.services.invocation_processor.invocation_processor_common import ProgressImage from invokeai.app.services.session_queue.session_queue_common import ( BatchStatus, @@ -11,6 +10,7 @@ from invokeai.app.services.session_queue.session_queue_common import ( SessionQueueStatus, ) from invokeai.app.util.misc import get_timestamp +from invokeai.backend.model_management.model_manager import ModelInfo from invokeai.backend.model_management.models.base import BaseModelType, ModelType, SubModelType @@ -55,7 +55,7 @@ class EventServiceBase: graph_execution_state_id=graph_execution_state_id, node_id=node.get("id"), source_node_id=source_node_id, - progress_image=progress_image.dict() if progress_image is not None else None, + progress_image=progress_image.model_dump() if progress_image is not None else None, step=step, order=order, total_steps=total_steps, @@ -291,8 +291,8 @@ class EventServiceBase: started_at=str(session_queue_item.started_at) if session_queue_item.started_at else None, completed_at=str(session_queue_item.completed_at) if session_queue_item.completed_at else None, ), - batch_status=batch_status.dict(), - queue_status=queue_status.dict(), + batch_status=batch_status.model_dump(), + queue_status=queue_status.model_dump(), ), ) diff --git a/invokeai/app/services/image_files/image_files_base.py b/invokeai/app/services/image_files/image_files_base.py index d998f9024b..5dde7b05d6 100644 --- a/invokeai/app/services/image_files/image_files_base.py +++ b/invokeai/app/services/image_files/image_files_base.py @@ -1,4 +1,5 @@ from abc import ABC, abstractmethod +from pathlib import Path from typing import Optional from PIL.Image import Image as PILImageType @@ -13,7 +14,7 @@ class ImageFileStorageBase(ABC): pass @abstractmethod - def get_path(self, image_name: str, thumbnail: bool = False) -> str: + def get_path(self, image_name: str, thumbnail: bool = False) -> Path: """Gets the internal path to an image or thumbnail.""" pass diff --git a/invokeai/app/services/image_records/image_records_base.py b/invokeai/app/services/image_records/image_records_base.py index 58db6feb23..107ff85f9b 100644 --- a/invokeai/app/services/image_records/image_records_base.py +++ b/invokeai/app/services/image_records/image_records_base.py @@ -34,8 +34,8 @@ class ImageRecordStorageBase(ABC): @abstractmethod def get_many( self, - offset: Optional[int] = None, - limit: Optional[int] = None, + offset: int = 0, + limit: int = 10, image_origin: Optional[ResourceOrigin] = None, categories: Optional[list[ImageCategory]] = None, is_intermediate: Optional[bool] = None, @@ -69,11 +69,11 @@ class ImageRecordStorageBase(ABC): image_category: ImageCategory, width: int, height: int, - session_id: Optional[str], - node_id: Optional[str], - metadata: Optional[dict], - is_intermediate: bool = False, - starred: bool = False, + is_intermediate: Optional[bool] = False, + starred: Optional[bool] = False, + session_id: Optional[str] = None, + node_id: Optional[str] = None, + metadata: Optional[dict] = None, ) -> datetime: """Saves an image record.""" pass diff --git a/invokeai/app/services/image_records/image_records_common.py b/invokeai/app/services/image_records/image_records_common.py index 39fac92048..5a6e5652c9 100644 --- a/invokeai/app/services/image_records/image_records_common.py +++ b/invokeai/app/services/image_records/image_records_common.py @@ -3,7 +3,7 @@ import datetime from enum import Enum from typing import Optional, Union -from pydantic import Extra, Field, StrictBool, StrictStr +from pydantic import Field, StrictBool, StrictStr from invokeai.app.util.metaenum import MetaEnum from invokeai.app.util.misc import get_iso_timestamp @@ -129,7 +129,9 @@ class ImageRecord(BaseModelExcludeNull): """The created timestamp of the image.""" updated_at: Union[datetime.datetime, str] = Field(description="The updated timestamp of the image.") """The updated timestamp of the image.""" - deleted_at: Union[datetime.datetime, str, None] = Field(description="The deleted timestamp of the image.") + deleted_at: Optional[Union[datetime.datetime, str]] = Field( + default=None, description="The deleted timestamp of the image." + ) """The deleted timestamp of the image.""" is_intermediate: bool = Field(description="Whether this is an intermediate image.") """Whether this is an intermediate image.""" @@ -147,7 +149,7 @@ class ImageRecord(BaseModelExcludeNull): """Whether this image is starred.""" -class ImageRecordChanges(BaseModelExcludeNull, extra=Extra.forbid): +class ImageRecordChanges(BaseModelExcludeNull, extra="allow"): """A set of changes to apply to an image record. Only limited changes are valid: diff --git a/invokeai/app/services/image_records/image_records_sqlite.py b/invokeai/app/services/image_records/image_records_sqlite.py index 864f4eff00..9793236d9c 100644 --- a/invokeai/app/services/image_records/image_records_sqlite.py +++ b/invokeai/app/services/image_records/image_records_sqlite.py @@ -2,7 +2,7 @@ import json import sqlite3 import threading from datetime import datetime -from typing import Optional, cast +from typing import Optional, Union, cast from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.sqlite import SqliteDatabase @@ -117,7 +117,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): """ ) - def get(self, image_name: str) -> Optional[ImageRecord]: + def get(self, image_name: str) -> ImageRecord: try: self._lock.acquire() @@ -223,8 +223,8 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): def get_many( self, - offset: Optional[int] = None, - limit: Optional[int] = None, + offset: int = 0, + limit: int = 10, image_origin: Optional[ResourceOrigin] = None, categories: Optional[list[ImageCategory]] = None, is_intermediate: Optional[bool] = None, @@ -249,7 +249,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): """ query_conditions = "" - query_params = [] + query_params: list[Union[int, str, bool]] = [] if image_origin is not None: query_conditions += """--sql @@ -387,13 +387,13 @@ class SqliteImageRecordStorage(ImageRecordStorageBase): image_name: str, image_origin: ResourceOrigin, image_category: ImageCategory, - session_id: Optional[str], width: int, height: int, - node_id: Optional[str], - metadata: Optional[dict], - is_intermediate: bool = False, - starred: bool = False, + is_intermediate: Optional[bool] = False, + starred: Optional[bool] = False, + session_id: Optional[str] = None, + node_id: Optional[str] = None, + metadata: Optional[dict] = None, ) -> datetime: try: metadata_json = None if metadata is None else json.dumps(metadata) diff --git a/invokeai/app/services/images/images_base.py b/invokeai/app/services/images/images_base.py index 71581099a3..a611e9485d 100644 --- a/invokeai/app/services/images/images_base.py +++ b/invokeai/app/services/images/images_base.py @@ -49,7 +49,7 @@ class ImageServiceABC(ABC): node_id: Optional[str] = None, session_id: Optional[str] = None, board_id: Optional[str] = None, - is_intermediate: bool = False, + is_intermediate: Optional[bool] = False, metadata: Optional[dict] = None, workflow: Optional[str] = None, ) -> ImageDTO: diff --git a/invokeai/app/services/images/images_common.py b/invokeai/app/services/images/images_common.py index f8b63a16c1..325cecdd26 100644 --- a/invokeai/app/services/images/images_common.py +++ b/invokeai/app/services/images/images_common.py @@ -20,7 +20,9 @@ class ImageUrlsDTO(BaseModelExcludeNull): class ImageDTO(ImageRecord, ImageUrlsDTO): """Deserialized image record, enriched for the frontend.""" - board_id: Optional[str] = Field(description="The id of the board the image belongs to, if one exists.") + board_id: Optional[str] = Field( + default=None, description="The id of the board the image belongs to, if one exists." + ) """The id of the board the image belongs to, if one exists.""" pass @@ -34,7 +36,7 @@ def image_record_to_dto( ) -> ImageDTO: """Converts an image record to an image DTO.""" return ImageDTO( - **image_record.dict(), + **image_record.model_dump(), image_url=image_url, thumbnail_url=thumbnail_url, board_id=board_id, diff --git a/invokeai/app/services/images/images_default.py b/invokeai/app/services/images/images_default.py index 9134b9a4f6..d4e473b8e4 100644 --- a/invokeai/app/services/images/images_default.py +++ b/invokeai/app/services/images/images_default.py @@ -41,7 +41,7 @@ class ImageService(ImageServiceABC): node_id: Optional[str] = None, session_id: Optional[str] = None, board_id: Optional[str] = None, - is_intermediate: bool = False, + is_intermediate: Optional[bool] = False, metadata: Optional[dict] = None, workflow: Optional[str] = None, ) -> ImageDTO: @@ -146,7 +146,7 @@ class ImageService(ImageServiceABC): self.__invoker.services.logger.error("Problem getting image DTO") raise e - def get_metadata(self, image_name: str) -> Optional[ImageMetadata]: + def get_metadata(self, image_name: str) -> ImageMetadata: try: image_record = self.__invoker.services.image_records.get(image_name) metadata = self.__invoker.services.image_records.get_metadata(image_name) @@ -174,7 +174,7 @@ class ImageService(ImageServiceABC): def get_path(self, image_name: str, thumbnail: bool = False) -> str: try: - return self.__invoker.services.image_files.get_path(image_name, thumbnail) + return str(self.__invoker.services.image_files.get_path(image_name, thumbnail)) except Exception as e: self.__invoker.services.logger.error("Problem getting image path") raise e diff --git a/invokeai/app/services/invocation_cache/invocation_cache_memory.py b/invokeai/app/services/invocation_cache/invocation_cache_memory.py index 817dbb958e..4a503b3c6b 100644 --- a/invokeai/app/services/invocation_cache/invocation_cache_memory.py +++ b/invokeai/app/services/invocation_cache/invocation_cache_memory.py @@ -58,7 +58,12 @@ class MemoryInvocationCache(InvocationCacheBase): # If the cache is full, we need to remove the least used number_to_delete = len(self._cache) + 1 - self._max_cache_size self._delete_oldest_access(number_to_delete) - self._cache[key] = CachedItem(invocation_output, invocation_output.json()) + self._cache[key] = CachedItem( + invocation_output, + invocation_output.model_dump_json( + warnings=False, exclude_defaults=True, exclude_unset=True, include={"type"} + ), + ) def _delete_oldest_access(self, number_to_delete: int) -> None: number_to_delete = min(number_to_delete, len(self._cache)) @@ -85,7 +90,7 @@ class MemoryInvocationCache(InvocationCacheBase): @staticmethod def create_key(invocation: BaseInvocation) -> int: - return hash(invocation.json(exclude={"id"})) + return hash(invocation.model_dump_json(exclude={"id"}, warnings=False)) def disable(self) -> None: with self._lock: diff --git a/invokeai/app/services/invocation_processor/invocation_processor_default.py b/invokeai/app/services/invocation_processor/invocation_processor_default.py index 349c4a03e4..c59fb678ef 100644 --- a/invokeai/app/services/invocation_processor/invocation_processor_default.py +++ b/invokeai/app/services/invocation_processor/invocation_processor_default.py @@ -89,7 +89,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC): queue_item_id=queue_item.session_queue_item_id, queue_id=queue_item.session_queue_id, graph_execution_state_id=graph_execution_state.id, - node=invocation.dict(), + node=invocation.model_dump(), source_node_id=source_node_id, ) @@ -127,9 +127,9 @@ class DefaultInvocationProcessor(InvocationProcessorABC): queue_item_id=queue_item.session_queue_item_id, queue_id=queue_item.session_queue_id, graph_execution_state_id=graph_execution_state.id, - node=invocation.dict(), + node=invocation.model_dump(), source_node_id=source_node_id, - result=outputs.dict(), + result=outputs.model_dump(), ) self.__invoker.services.performance_statistics.log_stats() @@ -157,7 +157,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC): queue_item_id=queue_item.session_queue_item_id, queue_id=queue_item.session_queue_id, graph_execution_state_id=graph_execution_state.id, - node=invocation.dict(), + node=invocation.model_dump(), source_node_id=source_node_id, error_type=e.__class__.__name__, error=error, @@ -187,7 +187,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC): queue_item_id=queue_item.session_queue_item_id, queue_id=queue_item.session_queue_id, graph_execution_state_id=graph_execution_state.id, - node=invocation.dict(), + node=invocation.model_dump(), source_node_id=source_node_id, error_type=e.__class__.__name__, error=traceback.format_exc(), diff --git a/invokeai/app/services/invocation_stats/invocation_stats_default.py b/invokeai/app/services/invocation_stats/invocation_stats_default.py index 2041ab6190..be019b6820 100644 --- a/invokeai/app/services/invocation_stats/invocation_stats_default.py +++ b/invokeai/app/services/invocation_stats/invocation_stats_default.py @@ -72,7 +72,7 @@ class InvocationStatsService(InvocationStatsServiceBase): ) self.collector.update_invocation_stats( graph_id=self.graph_id, - invocation_type=self.invocation.type, # type: ignore - `type` is not on the `BaseInvocation` model, but *is* on all invocations + invocation_type=self.invocation.type, # type: ignore # `type` is not on the `BaseInvocation` model, but *is* on all invocations time_used=time.time() - self.start_time, vram_used=torch.cuda.max_memory_allocated() / GIG if torch.cuda.is_available() else 0.0, ) diff --git a/invokeai/app/services/item_storage/item_storage_sqlite.py b/invokeai/app/services/item_storage/item_storage_sqlite.py index 1d6008e90f..1bb9429130 100644 --- a/invokeai/app/services/item_storage/item_storage_sqlite.py +++ b/invokeai/app/services/item_storage/item_storage_sqlite.py @@ -2,7 +2,7 @@ import sqlite3 import threading from typing import Generic, Optional, TypeVar, get_args -from pydantic import BaseModel, parse_raw_as +from pydantic import BaseModel, TypeAdapter from invokeai.app.services.shared.pagination import PaginatedResults from invokeai.app.services.shared.sqlite import SqliteDatabase @@ -18,6 +18,7 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): _cursor: sqlite3.Cursor _id_field: str _lock: threading.RLock + _adapter: Optional[TypeAdapter[T]] def __init__(self, db: SqliteDatabase, table_name: str, id_field: str = "id"): super().__init__() @@ -27,6 +28,7 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): self._table_name = table_name self._id_field = id_field # TODO: validate that T has this field self._cursor = self._conn.cursor() + self._adapter: Optional[TypeAdapter[T]] = None self._create_table() @@ -45,16 +47,21 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): self._lock.release() def _parse_item(self, item: str) -> T: - # __orig_class__ is technically an implementation detail of the typing module, not a supported API - item_type = get_args(self.__orig_class__)[0] # type: ignore - return parse_raw_as(item_type, item) + if self._adapter is None: + """ + We don't get access to `__orig_class__` in `__init__()`, and we need this before start(), so + we can create it when it is first needed instead. + __orig_class__ is technically an implementation detail of the typing module, not a supported API + """ + self._adapter = TypeAdapter(get_args(self.__orig_class__)[0]) # type: ignore [attr-defined] + return self._adapter.validate_json(item) def set(self, item: T): try: self._lock.acquire() self._cursor.execute( f"""INSERT OR REPLACE INTO {self._table_name} (item) VALUES (?);""", - (item.json(),), + (item.model_dump_json(warnings=False, exclude_none=True),), ) self._conn.commit() finally: diff --git a/invokeai/app/services/model_manager/model_manager_base.py b/invokeai/app/services/model_manager/model_manager_base.py index bb9110ba0a..4c2fc4c085 100644 --- a/invokeai/app/services/model_manager/model_manager_base.py +++ b/invokeai/app/services/model_manager/model_manager_base.py @@ -231,7 +231,7 @@ class ModelManagerServiceBase(ABC): def merge_models( self, model_names: List[str] = Field( - default=None, min_items=2, max_items=3, description="List of model names to merge" + default=None, min_length=2, max_length=3, description="List of model names to merge" ), base_model: Union[BaseModelType, str] = Field( default=None, description="Base model shared by all models to be merged" diff --git a/invokeai/app/services/model_manager/model_manager_default.py b/invokeai/app/services/model_manager/model_manager_default.py index 263f804b4d..cdb3e59a91 100644 --- a/invokeai/app/services/model_manager/model_manager_default.py +++ b/invokeai/app/services/model_manager/model_manager_default.py @@ -327,7 +327,7 @@ class ModelManagerService(ModelManagerServiceBase): def merge_models( self, model_names: List[str] = Field( - default=None, min_items=2, max_items=3, description="List of model names to merge" + default=None, min_length=2, max_length=3, description="List of model names to merge" ), base_model: Union[BaseModelType, str] = Field( default=None, description="Base model shared by all models to be merged" diff --git a/invokeai/app/services/session_queue/session_queue_common.py b/invokeai/app/services/session_queue/session_queue_common.py index 2d40a5b0c4..48e1da83b5 100644 --- a/invokeai/app/services/session_queue/session_queue_common.py +++ b/invokeai/app/services/session_queue/session_queue_common.py @@ -3,8 +3,8 @@ import json from itertools import chain, product from typing import Generator, Iterable, Literal, NamedTuple, Optional, TypeAlias, Union, cast -from pydantic import BaseModel, Field, StrictStr, parse_raw_as, root_validator, validator -from pydantic.json import pydantic_encoder +from pydantic import BaseModel, ConfigDict, Field, StrictStr, TypeAdapter, field_validator, model_validator +from pydantic_core import to_jsonable_python from invokeai.app.invocations.baseinvocation import BaseInvocation from invokeai.app.services.shared.graph import Graph, GraphExecutionState, NodeNotFoundError @@ -17,7 +17,7 @@ class BatchZippedLengthError(ValueError): """Raise when a batch has items of different lengths.""" -class BatchItemsTypeError(TypeError): +class BatchItemsTypeError(ValueError): # this cannot be a TypeError in pydantic v2 """Raise when a batch has items of different types.""" @@ -70,7 +70,7 @@ class Batch(BaseModel): default=1, ge=1, description="Int stating how many times to iterate through all possible batch indices" ) - @validator("data") + @field_validator("data") def validate_lengths(cls, v: Optional[BatchDataCollection]): if v is None: return v @@ -81,7 +81,7 @@ class Batch(BaseModel): raise BatchZippedLengthError("Zipped batch items must all have the same length") return v - @validator("data") + @field_validator("data") def validate_types(cls, v: Optional[BatchDataCollection]): if v is None: return v @@ -94,7 +94,7 @@ class Batch(BaseModel): raise BatchItemsTypeError("All items in a batch must have the same type") return v - @validator("data") + @field_validator("data") def validate_unique_field_mappings(cls, v: Optional[BatchDataCollection]): if v is None: return v @@ -107,34 +107,35 @@ class Batch(BaseModel): paths.add(pair) return v - @root_validator(skip_on_failure=True) + @model_validator(mode="after") def validate_batch_nodes_and_edges(cls, values): - batch_data_collection = cast(Optional[BatchDataCollection], values["data"]) + batch_data_collection = cast(Optional[BatchDataCollection], values.data) if batch_data_collection is None: return values - graph = cast(Graph, values["graph"]) + graph = cast(Graph, values.graph) for batch_data_list in batch_data_collection: for batch_data in batch_data_list: try: node = cast(BaseInvocation, graph.get_node(batch_data.node_path)) except NodeNotFoundError: raise NodeNotFoundError(f"Node {batch_data.node_path} not found in graph") - if batch_data.field_name not in node.__fields__: + if batch_data.field_name not in node.model_fields: raise NodeNotFoundError(f"Field {batch_data.field_name} not found in node {batch_data.node_path}") return values - @validator("graph") + @field_validator("graph") def validate_graph(cls, v: Graph): v.validate_self() return v - class Config: - schema_extra = { - "required": [ + model_config = ConfigDict( + json_schema_extra=dict( + required=[ "graph", "runs", ] - } + ) + ) # endregion Batch @@ -146,15 +147,21 @@ DEFAULT_QUEUE_ID = "default" QUEUE_ITEM_STATUS = Literal["pending", "in_progress", "completed", "failed", "canceled"] +adapter_NodeFieldValue = TypeAdapter(list[NodeFieldValue]) + def get_field_values(queue_item_dict: dict) -> Optional[list[NodeFieldValue]]: field_values_raw = queue_item_dict.get("field_values", None) - return parse_raw_as(list[NodeFieldValue], field_values_raw) if field_values_raw is not None else None + return adapter_NodeFieldValue.validate_json(field_values_raw) if field_values_raw is not None else None + + +adapter_GraphExecutionState = TypeAdapter(GraphExecutionState) def get_session(queue_item_dict: dict) -> GraphExecutionState: session_raw = queue_item_dict.get("session", "{}") - return parse_raw_as(GraphExecutionState, session_raw) + session = adapter_GraphExecutionState.validate_json(session_raw, strict=False) + return session class SessionQueueItemWithoutGraph(BaseModel): @@ -178,14 +185,14 @@ class SessionQueueItemWithoutGraph(BaseModel): ) @classmethod - def from_dict(cls, queue_item_dict: dict) -> "SessionQueueItemDTO": + def queue_item_dto_from_dict(cls, queue_item_dict: dict) -> "SessionQueueItemDTO": # must parse these manually queue_item_dict["field_values"] = get_field_values(queue_item_dict) return SessionQueueItemDTO(**queue_item_dict) - class Config: - schema_extra = { - "required": [ + model_config = ConfigDict( + json_schema_extra=dict( + required=[ "item_id", "status", "batch_id", @@ -196,7 +203,8 @@ class SessionQueueItemWithoutGraph(BaseModel): "created_at", "updated_at", ] - } + ) + ) class SessionQueueItemDTO(SessionQueueItemWithoutGraph): @@ -207,15 +215,15 @@ class SessionQueueItem(SessionQueueItemWithoutGraph): session: GraphExecutionState = Field(description="The fully-populated session to be executed") @classmethod - def from_dict(cls, queue_item_dict: dict) -> "SessionQueueItem": + def queue_item_from_dict(cls, queue_item_dict: dict) -> "SessionQueueItem": # must parse these manually queue_item_dict["field_values"] = get_field_values(queue_item_dict) queue_item_dict["session"] = get_session(queue_item_dict) return SessionQueueItem(**queue_item_dict) - class Config: - schema_extra = { - "required": [ + model_config = ConfigDict( + json_schema_extra=dict( + required=[ "item_id", "status", "batch_id", @@ -227,7 +235,8 @@ class SessionQueueItem(SessionQueueItemWithoutGraph): "created_at", "updated_at", ] - } + ) + ) # endregion Queue Items @@ -321,7 +330,7 @@ def populate_graph(graph: Graph, node_field_values: Iterable[NodeFieldValue]) -> """ Populates the given graph with the given batch data items. """ - graph_clone = graph.copy(deep=True) + graph_clone = graph.model_copy(deep=True) for item in node_field_values: node = graph_clone.get_node(item.node_path) if node is None: @@ -354,7 +363,7 @@ def create_session_nfv_tuples( for item in batch_datum.items ] node_field_values_to_zip.append(node_field_values) - data.append(list(zip(*node_field_values_to_zip))) + data.append(list(zip(*node_field_values_to_zip))) # type: ignore [arg-type] # create generator to yield session,nfv tuples count = 0 @@ -409,11 +418,11 @@ def prepare_values_to_insert(queue_id: str, batch: Batch, priority: int, max_new values_to_insert.append( SessionQueueValueToInsert( queue_id, # queue_id - session.json(), # session (json) + session.model_dump_json(warnings=False, exclude_none=True), # session (json) session.id, # session_id batch.batch_id, # batch_id # must use pydantic_encoder bc field_values is a list of models - json.dumps(field_values, default=pydantic_encoder) if field_values else None, # field_values (json) + json.dumps(field_values, default=to_jsonable_python) if field_values else None, # field_values (json) priority, # priority ) ) @@ -421,3 +430,6 @@ def prepare_values_to_insert(queue_id: str, batch: Batch, priority: int, max_new # endregion Util + +Batch.model_rebuild(force=True) +SessionQueueItem.model_rebuild(force=True) diff --git a/invokeai/app/services/session_queue/session_queue_sqlite.py b/invokeai/app/services/session_queue/session_queue_sqlite.py index eb82667be5..4daab9cdbc 100644 --- a/invokeai/app/services/session_queue/session_queue_sqlite.py +++ b/invokeai/app/services/session_queue/session_queue_sqlite.py @@ -277,8 +277,8 @@ class SqliteSessionQueue(SessionQueueBase): if result is None: raise SessionQueueItemNotFoundError(f"No queue item with batch id {enqueue_result.batch.batch_id}") return EnqueueGraphResult( - **enqueue_result.dict(), - queue_item=SessionQueueItemDTO.from_dict(dict(result)), + **enqueue_result.model_dump(), + queue_item=SessionQueueItemDTO.queue_item_dto_from_dict(dict(result)), ) def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult: @@ -351,7 +351,7 @@ class SqliteSessionQueue(SessionQueueBase): self.__lock.release() if result is None: return None - queue_item = SessionQueueItem.from_dict(dict(result)) + queue_item = SessionQueueItem.queue_item_from_dict(dict(result)) queue_item = self._set_queue_item_status(item_id=queue_item.item_id, status="in_progress") return queue_item @@ -380,7 +380,7 @@ class SqliteSessionQueue(SessionQueueBase): self.__lock.release() if result is None: return None - return SessionQueueItem.from_dict(dict(result)) + return SessionQueueItem.queue_item_from_dict(dict(result)) def get_current(self, queue_id: str) -> Optional[SessionQueueItem]: try: @@ -404,7 +404,7 @@ class SqliteSessionQueue(SessionQueueBase): self.__lock.release() if result is None: return None - return SessionQueueItem.from_dict(dict(result)) + return SessionQueueItem.queue_item_from_dict(dict(result)) def _set_queue_item_status( self, item_id: int, status: QUEUE_ITEM_STATUS, error: Optional[str] = None @@ -564,7 +564,7 @@ class SqliteSessionQueue(SessionQueueBase): queue_item = self.get_queue_item(item_id) if queue_item.status not in ["canceled", "failed", "completed"]: status = "failed" if error is not None else "canceled" - queue_item = self._set_queue_item_status(item_id=item_id, status=status, error=error) + queue_item = self._set_queue_item_status(item_id=item_id, status=status, error=error) # type: ignore [arg-type] # mypy seems to not narrow the Literals here self.__invoker.services.queue.cancel(queue_item.session_id) self.__invoker.services.events.emit_session_canceled( queue_item_id=queue_item.item_id, @@ -699,7 +699,7 @@ class SqliteSessionQueue(SessionQueueBase): self.__lock.release() if result is None: raise SessionQueueItemNotFoundError(f"No queue item with id {item_id}") - return SessionQueueItem.from_dict(dict(result)) + return SessionQueueItem.queue_item_from_dict(dict(result)) def list_queue_items( self, @@ -751,7 +751,7 @@ class SqliteSessionQueue(SessionQueueBase): params.append(limit + 1) self.__cursor.execute(query, params) results = cast(list[sqlite3.Row], self.__cursor.fetchall()) - items = [SessionQueueItemDTO.from_dict(dict(result)) for result in results] + items = [SessionQueueItemDTO.queue_item_dto_from_dict(dict(result)) for result in results] has_more = False if len(items) > limit: # remove the extra item diff --git a/invokeai/app/services/shared/default_graphs.py b/invokeai/app/services/shared/default_graphs.py index b2d0a1f0b6..9a6e2456cb 100644 --- a/invokeai/app/services/shared/default_graphs.py +++ b/invokeai/app/services/shared/default_graphs.py @@ -80,10 +80,10 @@ def create_system_graphs(graph_library: ItemStorageABC[LibraryGraph]) -> list[Li # TODO: Uncomment this when we are ready to fix this up to prevent breaking changes graphs: list[LibraryGraph] = list() - # text_to_image = graph_library.get(default_text_to_image_graph_id) + text_to_image = graph_library.get(default_text_to_image_graph_id) - # # TODO: Check if the graph is the same as the default one, and if not, update it - # #if text_to_image is None: + # TODO: Check if the graph is the same as the default one, and if not, update it + # if text_to_image is None: text_to_image = create_text_to_image() graph_library.set(text_to_image) diff --git a/invokeai/app/services/shared/graph.py b/invokeai/app/services/shared/graph.py index dab045af9d..8f974f7c6b 100644 --- a/invokeai/app/services/shared/graph.py +++ b/invokeai/app/services/shared/graph.py @@ -5,7 +5,7 @@ import itertools from typing import Annotated, Any, Optional, Union, get_args, get_origin, get_type_hints import networkx as nx -from pydantic import BaseModel, root_validator, validator +from pydantic import BaseModel, ConfigDict, field_validator, model_validator from pydantic.fields import Field # Importing * is bad karma but needed here for node detection @@ -235,7 +235,8 @@ class CollectInvocationOutput(BaseInvocationOutput): class CollectInvocation(BaseInvocation): """Collects values into a collection""" - item: Any = InputField( + item: Optional[Any] = InputField( + default=None, description="The item to collect (all inputs must be of the same type)", ui_type=UIType.CollectionItem, title="Collection Item", @@ -250,8 +251,8 @@ class CollectInvocation(BaseInvocation): return CollectInvocationOutput(collection=copy.copy(self.collection)) -InvocationsUnion = Union[BaseInvocation.get_invocations()] # type: ignore -InvocationOutputsUnion = Union[BaseInvocationOutput.get_all_subclasses_tuple()] # type: ignore +InvocationsUnion: Any = BaseInvocation.get_invocations_union() +InvocationOutputsUnion: Any = BaseInvocationOutput.get_outputs_union() class Graph(BaseModel): @@ -378,13 +379,13 @@ class Graph(BaseModel): raise NodeNotFoundError(f"Edge destination node {edge.destination.node_id} does not exist in the graph") # output fields are not on the node object directly, they are on the output type - if edge.source.field not in source_node.get_output_type().__fields__: + if edge.source.field not in source_node.get_output_type().model_fields: raise NodeFieldNotFoundError( f"Edge source field {edge.source.field} does not exist in node {edge.source.node_id}" ) # input fields are on the node - if edge.destination.field not in destination_node.__fields__: + if edge.destination.field not in destination_node.model_fields: raise NodeFieldNotFoundError( f"Edge destination field {edge.destination.field} does not exist in node {edge.destination.node_id}" ) @@ -395,24 +396,24 @@ class Graph(BaseModel): raise CyclicalGraphError("Graph contains cycles") # Validate all edge connections are valid - for e in self.edges: + for edge in self.edges: if not are_connections_compatible( - self.get_node(e.source.node_id), - e.source.field, - self.get_node(e.destination.node_id), - e.destination.field, + self.get_node(edge.source.node_id), + edge.source.field, + self.get_node(edge.destination.node_id), + edge.destination.field, ): raise InvalidEdgeError( - f"Invalid edge from {e.source.node_id}.{e.source.field} to {e.destination.node_id}.{e.destination.field}" + f"Invalid edge from {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}" ) # Validate all iterators & collectors # TODO: may need to validate all iterators & collectors in subgraphs so edge connections in parent graphs will be available - for n in self.nodes.values(): - if isinstance(n, IterateInvocation) and not self._is_iterator_connection_valid(n.id): - raise InvalidEdgeError(f"Invalid iterator node {n.id}") - if isinstance(n, CollectInvocation) and not self._is_collector_connection_valid(n.id): - raise InvalidEdgeError(f"Invalid collector node {n.id}") + for node in self.nodes.values(): + if isinstance(node, IterateInvocation) and not self._is_iterator_connection_valid(node.id): + raise InvalidEdgeError(f"Invalid iterator node {node.id}") + if isinstance(node, CollectInvocation) and not self._is_collector_connection_valid(node.id): + raise InvalidEdgeError(f"Invalid collector node {node.id}") return None @@ -594,7 +595,7 @@ class Graph(BaseModel): def _get_input_edges_and_graphs( self, node_path: str, prefix: Optional[str] = None - ) -> list[tuple["Graph", str, Edge]]: + ) -> list[tuple["Graph", Union[str, None], Edge]]: """Gets all input edges for a node along with the graph they are in and the graph's path""" edges = list() @@ -636,7 +637,7 @@ class Graph(BaseModel): def _get_output_edges_and_graphs( self, node_path: str, prefix: Optional[str] = None - ) -> list[tuple["Graph", str, Edge]]: + ) -> list[tuple["Graph", Union[str, None], Edge]]: """Gets all output edges for a node along with the graph they are in and the graph's path""" edges = list() @@ -817,15 +818,15 @@ class GraphExecutionState(BaseModel): default_factory=dict, ) - @validator("graph") + @field_validator("graph") def graph_is_valid(cls, v: Graph): """Validates that the graph is valid""" v.validate_self() return v - class Config: - schema_extra = { - "required": [ + model_config = ConfigDict( + json_schema_extra=dict( + required=[ "id", "graph", "execution_graph", @@ -836,7 +837,8 @@ class GraphExecutionState(BaseModel): "prepared_source_mapping", "source_prepared_mapping", ] - } + ) + ) def next(self) -> Optional[BaseInvocation]: """Gets the next node ready to execute.""" @@ -910,7 +912,7 @@ class GraphExecutionState(BaseModel): input_collection = getattr(input_collection_prepared_node_output, input_collection_edge.source.field) self_iteration_count = len(input_collection) - new_nodes = list() + new_nodes: list[str] = list() if self_iteration_count == 0: # TODO: should this raise a warning? It might just happen if an empty collection is input, and should be valid. return new_nodes @@ -920,7 +922,7 @@ class GraphExecutionState(BaseModel): # Create new edges for this iteration # For collect nodes, this may contain multiple inputs to the same field - new_edges = list() + new_edges: list[Edge] = list() for edge in input_edges: for input_node_id in (n[1] for n in iteration_node_map if n[0] == edge.source.node_id): new_edge = Edge( @@ -1179,18 +1181,18 @@ class LibraryGraph(BaseModel): description="The outputs exposed by this graph", default_factory=list ) - @validator("exposed_inputs", "exposed_outputs") - def validate_exposed_aliases(cls, v): + @field_validator("exposed_inputs", "exposed_outputs") + def validate_exposed_aliases(cls, v: list[Union[ExposedNodeInput, ExposedNodeOutput]]): if len(v) != len(set(i.alias for i in v)): raise ValueError("Duplicate exposed alias") return v - @root_validator + @model_validator(mode="after") def validate_exposed_nodes(cls, values): - graph = values["graph"] + graph = values.graph # Validate exposed inputs - for exposed_input in values["exposed_inputs"]: + for exposed_input in values.exposed_inputs: if not graph.has_node(exposed_input.node_path): raise ValueError(f"Exposed input node {exposed_input.node_path} does not exist") node = graph.get_node(exposed_input.node_path) @@ -1200,7 +1202,7 @@ class LibraryGraph(BaseModel): ) # Validate exposed outputs - for exposed_output in values["exposed_outputs"]: + for exposed_output in values.exposed_outputs: if not graph.has_node(exposed_output.node_path): raise ValueError(f"Exposed output node {exposed_output.node_path} does not exist") node = graph.get_node(exposed_output.node_path) @@ -1212,4 +1214,6 @@ class LibraryGraph(BaseModel): return values -GraphInvocation.update_forward_refs() +GraphInvocation.model_rebuild(force=True) +Graph.model_rebuild(force=True) +GraphExecutionState.model_rebuild(force=True) diff --git a/invokeai/app/services/shared/pagination.py b/invokeai/app/services/shared/pagination.py index 85c8fb984e..ea342b1101 100644 --- a/invokeai/app/services/shared/pagination.py +++ b/invokeai/app/services/shared/pagination.py @@ -1,12 +1,11 @@ from typing import Generic, TypeVar from pydantic import BaseModel, Field -from pydantic.generics import GenericModel GenericBaseModel = TypeVar("GenericBaseModel", bound=BaseModel) -class CursorPaginatedResults(GenericModel, Generic[GenericBaseModel]): +class CursorPaginatedResults(BaseModel, Generic[GenericBaseModel]): """ Cursor-paginated results Generic must be a Pydantic model @@ -17,7 +16,7 @@ class CursorPaginatedResults(GenericModel, Generic[GenericBaseModel]): items: list[GenericBaseModel] = Field(..., description="Items") -class OffsetPaginatedResults(GenericModel, Generic[GenericBaseModel]): +class OffsetPaginatedResults(BaseModel, Generic[GenericBaseModel]): """ Offset-paginated results Generic must be a Pydantic model @@ -29,7 +28,7 @@ class OffsetPaginatedResults(GenericModel, Generic[GenericBaseModel]): items: list[GenericBaseModel] = Field(description="Items") -class PaginatedResults(GenericModel, Generic[GenericBaseModel]): +class PaginatedResults(BaseModel, Generic[GenericBaseModel]): """ Paginated results Generic must be a Pydantic model diff --git a/invokeai/app/util/controlnet_utils.py b/invokeai/app/util/controlnet_utils.py index e6f34a4c44..51ceec2edd 100644 --- a/invokeai/app/util/controlnet_utils.py +++ b/invokeai/app/util/controlnet_utils.py @@ -265,7 +265,7 @@ def np_img_resize(np_img: np.ndarray, resize_mode: str, h: int, w: int, device: def prepare_control_image( - image: Image, + image: Image.Image, width: int, height: int, num_channels: int = 3, diff --git a/invokeai/app/util/misc.py b/invokeai/app/util/misc.py index 6d56652ed4..910b05d8dd 100644 --- a/invokeai/app/util/misc.py +++ b/invokeai/app/util/misc.py @@ -1,4 +1,5 @@ import datetime +import typing import uuid import numpy as np @@ -27,3 +28,8 @@ def get_random_seed(): def uuid_string(): res = uuid.uuid4() return str(res) + + +def is_optional(value: typing.Any): + """Checks if a value is typed as Optional. Note that Optional is sugar for Union[x, None].""" + return typing.get_origin(value) is typing.Union and type(None) in typing.get_args(value) diff --git a/invokeai/app/util/model_exclude_null.py b/invokeai/app/util/model_exclude_null.py index b75f127ec7..6da41039b4 100644 --- a/invokeai/app/util/model_exclude_null.py +++ b/invokeai/app/util/model_exclude_null.py @@ -13,11 +13,11 @@ From https://github.com/tiangolo/fastapi/discussions/8882#discussioncomment-5154 class BaseModelExcludeNull(BaseModel): - def dict(self, *args, **kwargs) -> dict[str, Any]: + def model_dump(self, *args, **kwargs) -> dict[str, Any]: """ Override the default dict method to exclude None values in the response """ kwargs.pop("exclude_none", None) - return super().dict(*args, exclude_none=True, **kwargs) + return super().model_dump(*args, exclude_none=True, **kwargs) pass diff --git a/invokeai/assets/__init__.py b/invokeai/assets/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/invokeai/backend/image_util/txt2mask.py b/invokeai/backend/image_util/txt2mask.py index 12db54b0db..de0c6a1652 100644 --- a/invokeai/backend/image_util/txt2mask.py +++ b/invokeai/backend/image_util/txt2mask.py @@ -41,18 +41,18 @@ config = InvokeAIAppConfig.get_config() class SegmentedGrayscale(object): - def __init__(self, image: Image, heatmap: torch.Tensor): + def __init__(self, image: Image.Image, heatmap: torch.Tensor): self.heatmap = heatmap self.image = image - def to_grayscale(self, invert: bool = False) -> Image: + def to_grayscale(self, invert: bool = False) -> Image.Image: return self._rescale(Image.fromarray(np.uint8(255 - self.heatmap * 255 if invert else self.heatmap * 255))) - def to_mask(self, threshold: float = 0.5) -> Image: + def to_mask(self, threshold: float = 0.5) -> Image.Image: discrete_heatmap = self.heatmap.lt(threshold).int() return self._rescale(Image.fromarray(np.uint8(discrete_heatmap * 255), mode="L")) - def to_transparent(self, invert: bool = False) -> Image: + def to_transparent(self, invert: bool = False) -> Image.Image: transparent_image = self.image.copy() # For img2img, we want the selected regions to be transparent, # but to_grayscale() returns the opposite. Thus invert. @@ -61,7 +61,7 @@ class SegmentedGrayscale(object): return transparent_image # unscales and uncrops the 352x352 heatmap so that it matches the image again - def _rescale(self, heatmap: Image) -> Image: + def _rescale(self, heatmap: Image.Image) -> Image.Image: size = self.image.width if (self.image.width > self.image.height) else self.image.height resized_image = heatmap.resize((size, size), resample=Image.Resampling.LANCZOS) return resized_image.crop((0, 0, self.image.width, self.image.height)) @@ -82,7 +82,7 @@ class Txt2Mask(object): self.model = CLIPSegForImageSegmentation.from_pretrained(CLIPSEG_MODEL, cache_dir=config.cache_dir) @torch.no_grad() - def segment(self, image, prompt: str) -> SegmentedGrayscale: + def segment(self, image: Image.Image, prompt: str) -> SegmentedGrayscale: """ Given a prompt string such as "a bagel", tries to identify the object in the provided image and returns a SegmentedGrayscale object in which the brighter @@ -99,7 +99,7 @@ class Txt2Mask(object): heatmap = torch.sigmoid(outputs.logits) return SegmentedGrayscale(image, heatmap) - def _scale_and_crop(self, image: Image) -> Image: + def _scale_and_crop(self, image: Image.Image) -> Image.Image: scaled_image = Image.new("RGB", (CLIPSEG_SIZE, CLIPSEG_SIZE)) if image.width > image.height: # width is constraint scale = CLIPSEG_SIZE / image.width diff --git a/invokeai/backend/image_util/util.py b/invokeai/backend/image_util/util.py index bc7fa01e3b..7eceb9be82 100644 --- a/invokeai/backend/image_util/util.py +++ b/invokeai/backend/image_util/util.py @@ -9,7 +9,7 @@ class InitImageResizer: def __init__(self, Image): self.image = Image - def resize(self, width=None, height=None) -> Image: + def resize(self, width=None, height=None) -> Image.Image: """ Return a copy of the image resized to fit within a box width x height. The aspect ratio is diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index d4bcea64d0..59cf1260ba 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -793,7 +793,11 @@ def migrate_init_file(legacy_format: Path): old = legacy_parser.parse_args([f"@{str(legacy_format)}"]) new = InvokeAIAppConfig.get_config() - fields = [x for x, y in InvokeAIAppConfig.__fields__.items() if y.field_info.extra.get("category") != "DEPRECATED"] + fields = [ + x + for x, y in InvokeAIAppConfig.model_fields.items() + if (y.json_schema_extra.get("category", None) if y.json_schema_extra else None) != "DEPRECATED" + ] for attr in fields: if hasattr(old, attr): try: diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index bdc9a6c6bb..38a7361c85 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -236,13 +236,13 @@ import types from dataclasses import dataclass from pathlib import Path from shutil import move, rmtree -from typing import Callable, Dict, List, Literal, Optional, Set, Tuple, Union +from typing import Callable, Dict, List, Literal, Optional, Set, Tuple, Union, cast import torch import yaml from omegaconf import OmegaConf from omegaconf.dictconfig import DictConfig -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field import invokeai.backend.util.logging as logger from invokeai.app.services.config import InvokeAIAppConfig @@ -294,6 +294,8 @@ class AddModelResult(BaseModel): base_model: BaseModelType = Field(description="The base model") config: ModelConfigBase = Field(description="The configuration of the model") + model_config = ConfigDict(protected_namespaces=()) + MAX_CACHE_SIZE = 6.0 # GB @@ -576,7 +578,7 @@ class ModelManager(object): """ model_key = self.create_key(model_name, base_model, model_type) if model_key in self.models: - return self.models[model_key].dict(exclude_defaults=True) + return self.models[model_key].model_dump(exclude_defaults=True) else: return None # TODO: None or empty dict on not found @@ -632,7 +634,7 @@ class ModelManager(object): continue model_dict = dict( - **model_config.dict(exclude_defaults=True), + **model_config.model_dump(exclude_defaults=True), # OpenAPIModelInfoBase model_name=cur_model_name, base_model=cur_base_model, @@ -900,14 +902,16 @@ class ModelManager(object): Write current configuration out to the indicated file. """ data_to_save = dict() - data_to_save["__metadata__"] = self.config_meta.dict() + data_to_save["__metadata__"] = self.config_meta.model_dump() for model_key, model_config in self.models.items(): model_name, base_model, model_type = self.parse_key(model_key) model_class = self._get_implementation(base_model, model_type) if model_class.save_to_config: # TODO: or exclude_unset better fits here? - data_to_save[model_key] = model_config.dict(exclude_defaults=True, exclude={"error"}) + data_to_save[model_key] = cast(BaseModel, model_config).model_dump( + exclude_defaults=True, exclude={"error"}, mode="json" + ) # alias for config file data_to_save[model_key]["format"] = data_to_save[model_key].pop("model_format") diff --git a/invokeai/backend/model_management/models/__init__.py b/invokeai/backend/model_management/models/__init__.py index bf4b208395..0afd731032 100644 --- a/invokeai/backend/model_management/models/__init__.py +++ b/invokeai/backend/model_management/models/__init__.py @@ -2,7 +2,7 @@ import inspect from enum import Enum from typing import Literal, get_origin -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict, create_model from .base import ( # noqa: F401 BaseModelType, @@ -106,6 +106,8 @@ class OpenAPIModelInfoBase(BaseModel): base_model: BaseModelType model_type: ModelType + model_config = ConfigDict(protected_namespaces=()) + for base_model, models in MODEL_CLASSES.items(): for model_type, model_class in models.items(): @@ -121,17 +123,11 @@ for base_model, models in MODEL_CLASSES.items(): if openapi_cfg_name in vars(): continue - api_wrapper = type( + api_wrapper = create_model( openapi_cfg_name, - (cfg, OpenAPIModelInfoBase), - dict( - __annotations__=dict( - model_type=Literal[model_type.value], - ), - ), + __base__=(cfg, OpenAPIModelInfoBase), + model_type=(Literal[model_type], model_type), # type: ignore ) - - # globals()[openapi_cfg_name] = api_wrapper vars()[openapi_cfg_name] = api_wrapper OPENAPI_MODEL_CONFIGS.append(api_wrapper) diff --git a/invokeai/backend/model_management/models/base.py b/invokeai/backend/model_management/models/base.py index 6e507735d4..f735e37189 100644 --- a/invokeai/backend/model_management/models/base.py +++ b/invokeai/backend/model_management/models/base.py @@ -19,7 +19,7 @@ from diffusers import logging as diffusers_logging from onnx import numpy_helper from onnxruntime import InferenceSession, SessionOptions, get_available_providers from picklescan.scanner import scan_file_path -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from transformers import logging as transformers_logging @@ -86,14 +86,21 @@ class ModelError(str, Enum): NotFound = "not_found" +def model_config_json_schema_extra(schema: dict[str, Any]) -> None: + if "required" not in schema: + schema["required"] = [] + schema["required"].append("model_type") + + class ModelConfigBase(BaseModel): path: str # or Path description: Optional[str] = Field(None) model_format: Optional[str] = Field(None) error: Optional[ModelError] = Field(None) - class Config: - use_enum_values = True + model_config = ConfigDict( + use_enum_values=True, protected_namespaces=(), json_schema_extra=model_config_json_schema_extra + ) class EmptyConfigLoader(ConfigMixin): diff --git a/invokeai/backend/model_management/models/ip_adapter.py b/invokeai/backend/model_management/models/ip_adapter.py index 63694af0c8..c60edd0abe 100644 --- a/invokeai/backend/model_management/models/ip_adapter.py +++ b/invokeai/backend/model_management/models/ip_adapter.py @@ -58,14 +58,16 @@ class IPAdapterModel(ModelBase): def get_model( self, - torch_dtype: Optional[torch.dtype], + torch_dtype: torch.dtype, child_type: Optional[SubModelType] = None, ) -> typing.Union[IPAdapter, IPAdapterPlus]: if child_type is not None: raise ValueError("There are no child models in an IP-Adapter model.") model = build_ip_adapter( - ip_adapter_ckpt_path=os.path.join(self.model_path, "ip_adapter.bin"), device="cpu", dtype=torch_dtype + ip_adapter_ckpt_path=os.path.join(self.model_path, "ip_adapter.bin"), + device=torch.device("cpu"), + dtype=torch_dtype, ) self.model_size = model.calc_size() diff --git a/invokeai/backend/model_management/seamless.py b/invokeai/backend/model_management/seamless.py index 7138f2e123..bfdf9e0c53 100644 --- a/invokeai/backend/model_management/seamless.py +++ b/invokeai/backend/model_management/seamless.py @@ -96,7 +96,7 @@ def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL], seamless_axe finally: for module, orig_conv_forward in to_restore: module._conv_forward = orig_conv_forward - if hasattr(m, "asymmetric_padding_mode"): - del m.asymmetric_padding_mode - if hasattr(m, "asymmetric_padding"): - del m.asymmetric_padding + if hasattr(module, "asymmetric_padding_mode"): + del module.asymmetric_padding_mode + if hasattr(module, "asymmetric_padding"): + del module.asymmetric_padding diff --git a/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py b/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py index abef979b1c..b5ea40185a 100644 --- a/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py +++ b/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py @@ -1,7 +1,8 @@ import math +from typing import Optional -import PIL import torch +from PIL import Image from torchvision.transforms.functional import InterpolationMode from torchvision.transforms.functional import resize as tv_resize @@ -11,7 +12,7 @@ class AttentionMapSaver: self.token_ids = token_ids self.latents_shape = latents_shape # self.collated_maps = #torch.zeros([len(token_ids), latents_shape[0], latents_shape[1]]) - self.collated_maps = {} + self.collated_maps: dict[str, torch.Tensor] = {} def clear_maps(self): self.collated_maps = {} @@ -38,9 +39,10 @@ class AttentionMapSaver: def write_maps_to_disk(self, path: str): pil_image = self.get_stacked_maps_image() - pil_image.save(path, "PNG") + if pil_image is not None: + pil_image.save(path, "PNG") - def get_stacked_maps_image(self) -> PIL.Image: + def get_stacked_maps_image(self) -> Optional[Image.Image]: """ Scale all collected attention maps to the same size, blend them together and return as an image. :return: An image containing a vertical stack of blended attention maps, one for each requested token. @@ -95,4 +97,4 @@ class AttentionMapSaver: return None merged_bytes = merged.mul(0xFF).byte() - return PIL.Image.fromarray(merged_bytes.numpy(), mode="L") + return Image.fromarray(merged_bytes.numpy(), mode="L") diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts index 79a09c628f..bd5422841f 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts @@ -151,7 +151,9 @@ export const addRequestedSingleImageDeletionListener = () => { if (wasImageDeleted) { dispatch( - api.util.invalidateTags([{ type: 'Board', id: imageDTO.board_id }]) + api.util.invalidateTags([ + { type: 'Board', id: imageDTO.board_id ?? 'none' }, + ]) ); } }, diff --git a/invokeai/frontend/web/src/common/components/IAIMantineMultiSelect.tsx b/invokeai/frontend/web/src/common/components/IAIMantineMultiSelect.tsx index 7c85b3557e..5ea17f788c 100644 --- a/invokeai/frontend/web/src/common/components/IAIMantineMultiSelect.tsx +++ b/invokeai/frontend/web/src/common/components/IAIMantineMultiSelect.tsx @@ -6,7 +6,7 @@ import { useMantineMultiSelectStyles } from 'mantine-theme/hooks/useMantineMulti import { KeyboardEvent, RefObject, memo, useCallback } from 'react'; type IAIMultiSelectProps = Omit & { - tooltip?: string; + tooltip?: string | null; inputRef?: RefObject; label?: string; }; diff --git a/invokeai/frontend/web/src/common/components/IAIMantineSearchableSelect.tsx b/invokeai/frontend/web/src/common/components/IAIMantineSearchableSelect.tsx index 39fe7ead3c..675314b421 100644 --- a/invokeai/frontend/web/src/common/components/IAIMantineSearchableSelect.tsx +++ b/invokeai/frontend/web/src/common/components/IAIMantineSearchableSelect.tsx @@ -12,7 +12,7 @@ export type IAISelectDataType = { }; type IAISelectProps = Omit & { - tooltip?: string; + tooltip?: string | null; label?: string; inputRef?: RefObject; }; diff --git a/invokeai/frontend/web/src/common/components/IAIMantineSelect.tsx b/invokeai/frontend/web/src/common/components/IAIMantineSelect.tsx index 8cc08d2304..9541015b65 100644 --- a/invokeai/frontend/web/src/common/components/IAIMantineSelect.tsx +++ b/invokeai/frontend/web/src/common/components/IAIMantineSelect.tsx @@ -10,7 +10,7 @@ export type IAISelectDataType = { }; export type IAISelectProps = Omit & { - tooltip?: string; + tooltip?: string | null; inputRef?: RefObject; label?: string; }; diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/store/dynamicPromptsSlice.ts b/invokeai/frontend/web/src/features/dynamicPrompts/store/dynamicPromptsSlice.ts index 32e24845ea..4c2cd31eca 100644 --- a/invokeai/frontend/web/src/features/dynamicPrompts/store/dynamicPromptsSlice.ts +++ b/invokeai/frontend/web/src/features/dynamicPrompts/store/dynamicPromptsSlice.ts @@ -39,7 +39,10 @@ export const dynamicPromptsSlice = createSlice({ promptsChanged: (state, action: PayloadAction) => { state.prompts = action.payload; }, - parsingErrorChanged: (state, action: PayloadAction) => { + parsingErrorChanged: ( + state, + action: PayloadAction + ) => { state.parsingError = action.payload; }, isErrorChanged: (state, action: PayloadAction) => { diff --git a/invokeai/frontend/web/src/features/nodes/types/types.ts b/invokeai/frontend/web/src/features/nodes/types/types.ts index f7ef848211..87c716bb81 100644 --- a/invokeai/frontend/web/src/features/nodes/types/types.ts +++ b/invokeai/frontend/web/src/features/nodes/types/types.ts @@ -10,7 +10,7 @@ import { } from 'features/parameters/types/parameterSchemas'; import i18n from 'i18next'; import { has, keyBy } from 'lodash-es'; -import { OpenAPIV3 } from 'openapi-types'; +import { OpenAPIV3_1 } from 'openapi-types'; import { RgbaColor } from 'react-colorful'; import { Node } from 'reactflow'; import { Graph, _InputField, _OutputField } from 'services/api/types'; @@ -791,9 +791,9 @@ export type IntegerInputFieldTemplate = InputFieldTemplateBase & { default: number; multipleOf?: number; maximum?: number; - exclusiveMaximum?: boolean; + exclusiveMaximum?: number; minimum?: number; - exclusiveMinimum?: boolean; + exclusiveMinimum?: number; }; export type IntegerCollectionInputFieldTemplate = InputFieldTemplateBase & { @@ -814,9 +814,9 @@ export type FloatInputFieldTemplate = InputFieldTemplateBase & { default: number; multipleOf?: number; maximum?: number; - exclusiveMaximum?: boolean; + exclusiveMaximum?: number; minimum?: number; - exclusiveMinimum?: boolean; + exclusiveMinimum?: number; }; export type FloatCollectionInputFieldTemplate = InputFieldTemplateBase & { @@ -1163,20 +1163,20 @@ export type TypeHints = { }; export type InvocationSchemaExtra = { - output: OpenAPIV3.ReferenceObject; // the output of the invocation + output: OpenAPIV3_1.ReferenceObject; // the output of the invocation title: string; category?: string; tags?: string[]; version?: string; properties: Omit< - NonNullable & + NonNullable & (_InputField | _OutputField), 'type' > & { - type: Omit & { + type: Omit & { default: AnyInvocationType; }; - use_cache: Omit & { + use_cache: Omit & { default: boolean; }; }; @@ -1187,17 +1187,17 @@ export type InvocationSchemaType = { }; export type InvocationBaseSchemaObject = Omit< - OpenAPIV3.BaseSchemaObject, + OpenAPIV3_1.BaseSchemaObject, 'title' | 'type' | 'properties' > & InvocationSchemaExtra; export type InvocationOutputSchemaObject = Omit< - OpenAPIV3.SchemaObject, + OpenAPIV3_1.SchemaObject, 'properties' > & { - properties: OpenAPIV3.SchemaObject['properties'] & { - type: Omit & { + properties: OpenAPIV3_1.SchemaObject['properties'] & { + type: Omit & { default: string; }; } & { @@ -1205,14 +1205,18 @@ export type InvocationOutputSchemaObject = Omit< }; }; -export type InvocationFieldSchema = OpenAPIV3.SchemaObject & _InputField; +export type InvocationFieldSchema = OpenAPIV3_1.SchemaObject & _InputField; + +export type OpenAPIV3_1SchemaOrRef = + | OpenAPIV3_1.ReferenceObject + | OpenAPIV3_1.SchemaObject; export interface ArraySchemaObject extends InvocationBaseSchemaObject { - type: OpenAPIV3.ArraySchemaObjectType; - items: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject; + type: OpenAPIV3_1.ArraySchemaObjectType; + items: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject; } export interface NonArraySchemaObject extends InvocationBaseSchemaObject { - type?: OpenAPIV3.NonArraySchemaObjectType; + type?: OpenAPIV3_1.NonArraySchemaObjectType; } export type InvocationSchemaObject = ( @@ -1221,41 +1225,41 @@ export type InvocationSchemaObject = ( ) & { class: 'invocation' }; export const isSchemaObject = ( - obj: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject | undefined -): obj is OpenAPIV3.SchemaObject => Boolean(obj && !('$ref' in obj)); + obj: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject | undefined +): obj is OpenAPIV3_1.SchemaObject => Boolean(obj && !('$ref' in obj)); export const isArraySchemaObject = ( - obj: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject | undefined -): obj is OpenAPIV3.ArraySchemaObject => + obj: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject | undefined +): obj is OpenAPIV3_1.ArraySchemaObject => Boolean(obj && !('$ref' in obj) && obj.type === 'array'); export const isNonArraySchemaObject = ( - obj: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject | undefined -): obj is OpenAPIV3.NonArraySchemaObject => + obj: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject | undefined +): obj is OpenAPIV3_1.NonArraySchemaObject => Boolean(obj && !('$ref' in obj) && obj.type !== 'array'); export const isRefObject = ( - obj: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject | undefined -): obj is OpenAPIV3.ReferenceObject => Boolean(obj && '$ref' in obj); + obj: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject | undefined +): obj is OpenAPIV3_1.ReferenceObject => Boolean(obj && '$ref' in obj); export const isInvocationSchemaObject = ( obj: - | OpenAPIV3.ReferenceObject - | OpenAPIV3.SchemaObject + | OpenAPIV3_1.ReferenceObject + | OpenAPIV3_1.SchemaObject | InvocationSchemaObject ): obj is InvocationSchemaObject => 'class' in obj && obj.class === 'invocation'; export const isInvocationOutputSchemaObject = ( obj: - | OpenAPIV3.ReferenceObject - | OpenAPIV3.SchemaObject + | OpenAPIV3_1.ReferenceObject + | OpenAPIV3_1.SchemaObject | InvocationOutputSchemaObject ): obj is InvocationOutputSchemaObject => 'class' in obj && obj.class === 'output'; export const isInvocationFieldSchema = ( - obj: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject + obj: OpenAPIV3_1.ReferenceObject | OpenAPIV3_1.SchemaObject ): obj is InvocationFieldSchema => !('$ref' in obj); export type InvocationEdgeExtra = { type: 'default' | 'collapsed' }; diff --git a/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts b/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts index 1f7fe81620..3fd44207c0 100644 --- a/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts +++ b/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts @@ -1,5 +1,12 @@ -import { isBoolean, isInteger, isNumber, isString } from 'lodash-es'; -import { OpenAPIV3 } from 'openapi-types'; +import { + isArray, + isBoolean, + isInteger, + isNumber, + isString, + startCase, +} from 'lodash-es'; +import { OpenAPIV3_1 } from 'openapi-types'; import { COLLECTION_MAP, POLYMORPHIC_TYPES, @@ -72,6 +79,7 @@ import { T2IAdapterCollectionInputFieldTemplate, BoardInputFieldTemplate, InputFieldTemplate, + OpenAPIV3_1SchemaOrRef, } from '../types/types'; import { ControlField } from 'services/api/types'; @@ -90,7 +98,7 @@ export type BuildInputFieldArg = { * @example * refObjectToFieldType({ "$ref": "#/components/schemas/ImageField" }) --> 'ImageField' */ -export const refObjectToSchemaName = (refObject: OpenAPIV3.ReferenceObject) => +export const refObjectToSchemaName = (refObject: OpenAPIV3_1.ReferenceObject) => refObject.$ref.split('/').slice(-1)[0]; const buildIntegerInputFieldTemplate = ({ @@ -111,7 +119,10 @@ const buildIntegerInputFieldTemplate = ({ template.maximum = schemaObject.maximum; } - if (schemaObject.exclusiveMaximum !== undefined) { + if ( + schemaObject.exclusiveMaximum !== undefined && + isNumber(schemaObject.exclusiveMaximum) + ) { template.exclusiveMaximum = schemaObject.exclusiveMaximum; } @@ -119,7 +130,10 @@ const buildIntegerInputFieldTemplate = ({ template.minimum = schemaObject.minimum; } - if (schemaObject.exclusiveMinimum !== undefined) { + if ( + schemaObject.exclusiveMinimum !== undefined && + isNumber(schemaObject.exclusiveMinimum) + ) { template.exclusiveMinimum = schemaObject.exclusiveMinimum; } @@ -144,7 +158,10 @@ const buildIntegerPolymorphicInputFieldTemplate = ({ template.maximum = schemaObject.maximum; } - if (schemaObject.exclusiveMaximum !== undefined) { + if ( + schemaObject.exclusiveMaximum !== undefined && + isNumber(schemaObject.exclusiveMaximum) + ) { template.exclusiveMaximum = schemaObject.exclusiveMaximum; } @@ -152,7 +169,10 @@ const buildIntegerPolymorphicInputFieldTemplate = ({ template.minimum = schemaObject.minimum; } - if (schemaObject.exclusiveMinimum !== undefined) { + if ( + schemaObject.exclusiveMinimum !== undefined && + isNumber(schemaObject.exclusiveMinimum) + ) { template.exclusiveMinimum = schemaObject.exclusiveMinimum; } @@ -195,7 +215,10 @@ const buildFloatInputFieldTemplate = ({ template.maximum = schemaObject.maximum; } - if (schemaObject.exclusiveMaximum !== undefined) { + if ( + schemaObject.exclusiveMaximum !== undefined && + isNumber(schemaObject.exclusiveMaximum) + ) { template.exclusiveMaximum = schemaObject.exclusiveMaximum; } @@ -203,7 +226,10 @@ const buildFloatInputFieldTemplate = ({ template.minimum = schemaObject.minimum; } - if (schemaObject.exclusiveMinimum !== undefined) { + if ( + schemaObject.exclusiveMinimum !== undefined && + isNumber(schemaObject.exclusiveMinimum) + ) { template.exclusiveMinimum = schemaObject.exclusiveMinimum; } @@ -227,7 +253,10 @@ const buildFloatPolymorphicInputFieldTemplate = ({ template.maximum = schemaObject.maximum; } - if (schemaObject.exclusiveMaximum !== undefined) { + if ( + schemaObject.exclusiveMaximum !== undefined && + isNumber(schemaObject.exclusiveMaximum) + ) { template.exclusiveMaximum = schemaObject.exclusiveMaximum; } @@ -235,7 +264,10 @@ const buildFloatPolymorphicInputFieldTemplate = ({ template.minimum = schemaObject.minimum; } - if (schemaObject.exclusiveMinimum !== undefined) { + if ( + schemaObject.exclusiveMinimum !== undefined && + isNumber(schemaObject.exclusiveMinimum) + ) { template.exclusiveMinimum = schemaObject.exclusiveMinimum; } return template; @@ -872,84 +904,106 @@ const buildSchedulerInputFieldTemplate = ({ }; export const getFieldType = ( - schemaObject: InvocationFieldSchema + schemaObject: OpenAPIV3_1SchemaOrRef ): string | undefined => { - if (schemaObject?.ui_type) { - return schemaObject.ui_type; - } else if (!schemaObject.type) { - // if schemaObject has no type, then it should have one of allOf, anyOf, oneOf + if (isSchemaObject(schemaObject)) { + if (!schemaObject.type) { + // if schemaObject has no type, then it should have one of allOf, anyOf, oneOf - if (schemaObject.allOf) { - const allOf = schemaObject.allOf; - if (allOf && allOf[0] && isRefObject(allOf[0])) { - return refObjectToSchemaName(allOf[0]); - } - } else if (schemaObject.anyOf) { - const anyOf = schemaObject.anyOf; - /** - * Handle Polymorphic inputs, eg string | string[]. In OpenAPI, this is: - * - an `anyOf` with two items - * - one is an `ArraySchemaObject` with a single `SchemaObject or ReferenceObject` of type T in its `items` - * - the other is a `SchemaObject` or `ReferenceObject` of type T - * - * Any other cases we ignore. - */ - - let firstType: string | undefined; - let secondType: string | undefined; - - if (isArraySchemaObject(anyOf[0])) { - // first is array, second is not - const first = anyOf[0].items; - const second = anyOf[1]; - if (isRefObject(first) && isRefObject(second)) { - firstType = refObjectToSchemaName(first); - secondType = refObjectToSchemaName(second); - } else if ( - isNonArraySchemaObject(first) && - isNonArraySchemaObject(second) - ) { - firstType = first.type; - secondType = second.type; + if (schemaObject.allOf) { + const allOf = schemaObject.allOf; + if (allOf && allOf[0] && isRefObject(allOf[0])) { + return refObjectToSchemaName(allOf[0]); } - } else if (isArraySchemaObject(anyOf[1])) { - // first is not array, second is - const first = anyOf[0]; - const second = anyOf[1].items; - if (isRefObject(first) && isRefObject(second)) { - firstType = refObjectToSchemaName(first); - secondType = refObjectToSchemaName(second); - } else if ( - isNonArraySchemaObject(first) && - isNonArraySchemaObject(second) - ) { - firstType = first.type; - secondType = second.type; + } else if (schemaObject.anyOf) { + // ignore null types + const anyOf = schemaObject.anyOf.filter((i) => { + if (isSchemaObject(i)) { + if (i.type === 'null') { + return false; + } + } + return true; + }); + if (anyOf.length === 1) { + if (isRefObject(anyOf[0])) { + return refObjectToSchemaName(anyOf[0]); + } else if (isSchemaObject(anyOf[0])) { + return getFieldType(anyOf[0]); + } + } + /** + * Handle Polymorphic inputs, eg string | string[]. In OpenAPI, this is: + * - an `anyOf` with two items + * - one is an `ArraySchemaObject` with a single `SchemaObject or ReferenceObject` of type T in its `items` + * - the other is a `SchemaObject` or `ReferenceObject` of type T + * + * Any other cases we ignore. + */ + + let firstType: string | undefined; + let secondType: string | undefined; + + if (isArraySchemaObject(anyOf[0])) { + // first is array, second is not + const first = anyOf[0].items; + const second = anyOf[1]; + if (isRefObject(first) && isRefObject(second)) { + firstType = refObjectToSchemaName(first); + secondType = refObjectToSchemaName(second); + } else if ( + isNonArraySchemaObject(first) && + isNonArraySchemaObject(second) + ) { + firstType = first.type; + secondType = second.type; + } + } else if (isArraySchemaObject(anyOf[1])) { + // first is not array, second is + const first = anyOf[0]; + const second = anyOf[1].items; + if (isRefObject(first) && isRefObject(second)) { + firstType = refObjectToSchemaName(first); + secondType = refObjectToSchemaName(second); + } else if ( + isNonArraySchemaObject(first) && + isNonArraySchemaObject(second) + ) { + firstType = first.type; + secondType = second.type; + } + } + if (firstType === secondType && isPolymorphicItemType(firstType)) { + return SINGLE_TO_POLYMORPHIC_MAP[firstType]; } } - if (firstType === secondType && isPolymorphicItemType(firstType)) { - return SINGLE_TO_POLYMORPHIC_MAP[firstType]; + } else if (schemaObject.enum) { + return 'enum'; + } else if (schemaObject.type) { + if (schemaObject.type === 'number') { + // floats are "number" in OpenAPI, while ints are "integer" - we need to distinguish them + return 'float'; + } else if (schemaObject.type === 'array') { + const itemType = isSchemaObject(schemaObject.items) + ? schemaObject.items.type + : refObjectToSchemaName(schemaObject.items); + + if (isArray(itemType)) { + // This is a nested array, which we don't support + return; + } + + if (isCollectionItemType(itemType)) { + return COLLECTION_MAP[itemType]; + } + + return; + } else if (!isArray(schemaObject.type)) { + return schemaObject.type; } } - } else if (schemaObject.enum) { - return 'enum'; - } else if (schemaObject.type) { - if (schemaObject.type === 'number') { - // floats are "number" in OpenAPI, while ints are "integer" - we need to distinguish them - return 'float'; - } else if (schemaObject.type === 'array') { - const itemType = isSchemaObject(schemaObject.items) - ? schemaObject.items.type - : refObjectToSchemaName(schemaObject.items); - - if (isCollectionItemType(itemType)) { - return COLLECTION_MAP[itemType]; - } - - return; - } else { - return schemaObject.type; - } + } else if (isRefObject(schemaObject)) { + return refObjectToSchemaName(schemaObject); } return; }; @@ -1025,7 +1079,15 @@ export const buildInputFieldTemplate = ( name: string, fieldType: FieldType ) => { - const { input, ui_hidden, ui_component, ui_type, ui_order } = fieldSchema; + const { + input, + ui_hidden, + ui_component, + ui_type, + ui_order, + ui_choice_labels, + item_default, + } = fieldSchema; const extra = { // TODO: Can we support polymorphic inputs in the UI? @@ -1035,11 +1097,13 @@ export const buildInputFieldTemplate = ( ui_type, required: nodeSchema.required?.includes(name) ?? false, ui_order, + ui_choice_labels, + item_default, }; const baseField = { name, - title: fieldSchema.title ?? '', + title: fieldSchema.title ?? (name ? startCase(name) : ''), description: fieldSchema.description ?? '', fieldKind: 'input' as const, ...extra, diff --git a/invokeai/frontend/web/src/features/nodes/util/parseSchema.ts b/invokeai/frontend/web/src/features/nodes/util/parseSchema.ts index 69d8d9dd4c..93cd75dd75 100644 --- a/invokeai/frontend/web/src/features/nodes/util/parseSchema.ts +++ b/invokeai/frontend/web/src/features/nodes/util/parseSchema.ts @@ -1,7 +1,7 @@ import { logger } from 'app/logging/logger'; import { parseify } from 'common/util/serialize'; -import { reduce } from 'lodash-es'; -import { OpenAPIV3 } from 'openapi-types'; +import { reduce, startCase } from 'lodash-es'; +import { OpenAPIV3_1 } from 'openapi-types'; import { AnyInvocationType } from 'services/events/types'; import { FieldType, @@ -60,7 +60,7 @@ const isNotInDenylist = (schema: InvocationSchemaObject) => !invocationDenylist.includes(schema.properties.type.default); export const parseSchema = ( - openAPI: OpenAPIV3.Document, + openAPI: OpenAPIV3_1.Document, nodesAllowlistExtra: string[] | undefined = undefined, nodesDenylistExtra: string[] | undefined = undefined ): Record => { @@ -110,7 +110,7 @@ export const parseSchema = ( return inputsAccumulator; } - const fieldType = getFieldType(property); + const fieldType = property.ui_type ?? getFieldType(property); if (!isFieldType(fieldType)) { logger('nodes').warn( @@ -209,7 +209,7 @@ export const parseSchema = ( return outputsAccumulator; } - const fieldType = getFieldType(property); + const fieldType = property.ui_type ?? getFieldType(property); if (!isFieldType(fieldType)) { logger('nodes').warn( @@ -222,7 +222,8 @@ export const parseSchema = ( outputsAccumulator[propertyName] = { fieldKind: 'output', name: propertyName, - title: property.title ?? '', + title: + property.title ?? (propertyName ? startCase(propertyName) : ''), description: property.description ?? '', type: fieldType, ui_hidden: property.ui_hidden ?? false, diff --git a/invokeai/frontend/web/src/features/queue/components/common/QueueItemCard.tsx b/invokeai/frontend/web/src/features/queue/components/common/QueueItemCard.tsx index 9cc991335e..d441be4ecb 100644 --- a/invokeai/frontend/web/src/features/queue/components/common/QueueItemCard.tsx +++ b/invokeai/frontend/web/src/features/queue/components/common/QueueItemCard.tsx @@ -7,7 +7,7 @@ const QueueItemCard = ({ session_queue_item, label, }: { - session_queue_item?: components['schemas']['SessionQueueItem']; + session_queue_item?: components['schemas']['SessionQueueItem'] | null; label: string; }) => { return ( diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/ModelManager/subpanels/MergeModelsPanel.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/ModelManager/subpanels/MergeModelsPanel.tsx index 6837a2e853..e5c68ba6cf 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/ModelManager/subpanels/MergeModelsPanel.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/ModelManager/subpanels/MergeModelsPanel.tsx @@ -112,7 +112,7 @@ export default function MergeModelsPanel() { } }); - const mergeModelsInfo: MergeModelConfig = { + const mergeModelsInfo: MergeModelConfig['body'] = { model_names: models_names, merged_model_name: mergedModelName !== '' ? mergedModelName : models_names.join('-'), @@ -125,7 +125,7 @@ export default function MergeModelsPanel() { mergeModels({ base_model: baseModel, - body: mergeModelsInfo, + body: { body: mergeModelsInfo }, }) .unwrap() .then((_) => { diff --git a/invokeai/frontend/web/src/services/api/endpoints/images.ts b/invokeai/frontend/web/src/services/api/endpoints/images.ts index 3fa606d4b6..99a5fc5f50 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/images.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/images.ts @@ -520,7 +520,7 @@ export const imagesApi = api.injectEndpoints({ // assume all images are on the same board/category if (images[0]) { const categories = getCategories(images[0]); - const boardId = images[0].board_id; + const boardId = images[0].board_id ?? undefined; return [ { @@ -637,7 +637,7 @@ export const imagesApi = api.injectEndpoints({ // assume all images are on the same board/category if (images[0]) { const categories = getCategories(images[0]); - const boardId = images[0].board_id; + const boardId = images[0].board_id ?? undefined; return [ { type: 'ImageList', diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index e476217e6c..d4678dc03b 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -5,80 +5,6 @@ export type paths = { - "/api/v1/sessions/": { - /** - * List Sessions - * @deprecated - * @description Gets a list of sessions, optionally searching - */ - get: operations["list_sessions"]; - /** - * Create Session - * @deprecated - * @description Creates a new session, optionally initializing it with an invocation graph - */ - post: operations["create_session"]; - }; - "/api/v1/sessions/{session_id}": { - /** - * Get Session - * @deprecated - * @description Gets a session - */ - get: operations["get_session"]; - }; - "/api/v1/sessions/{session_id}/nodes": { - /** - * Add Node - * @deprecated - * @description Adds a node to the graph - */ - post: operations["add_node"]; - }; - "/api/v1/sessions/{session_id}/nodes/{node_path}": { - /** - * Update Node - * @deprecated - * @description Updates a node in the graph and removes all linked edges - */ - put: operations["update_node"]; - /** - * Delete Node - * @deprecated - * @description Deletes a node in the graph and removes all linked edges - */ - delete: operations["delete_node"]; - }; - "/api/v1/sessions/{session_id}/edges": { - /** - * Add Edge - * @deprecated - * @description Adds an edge to the graph - */ - post: operations["add_edge"]; - }; - "/api/v1/sessions/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}": { - /** - * Delete Edge - * @deprecated - * @description Deletes an edge from the graph - */ - delete: operations["delete_edge"]; - }; - "/api/v1/sessions/{session_id}/invoke": { - /** - * Invoke Session - * @deprecated - * @description Invokes a session - */ - put: operations["invoke_session"]; - /** - * Cancel Session Invoke - * @deprecated - * @description Invokes a session - */ - delete: operations["cancel_session_invoke"]; - }; "/api/v1/utilities/dynamicprompts": { /** * Parse Dynamicprompts @@ -481,18 +407,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * A * @description The first number @@ -506,9 +432,9 @@ export type components = { */ b?: number; /** - * Type + * type * @default add - * @enum {string} + * @constant */ type: "add"; }; @@ -551,7 +477,6 @@ export type components = { }; /** * BaseModelType - * @description An enumeration. * @enum {string} */ BaseModelType: "any" | "sd-1" | "sd-2" | "sdxl" | "sdxl-refiner"; @@ -566,11 +491,8 @@ export type components = { * Data * @description The batch data collection. */ - data?: components["schemas"]["BatchDatum"][][]; - /** - * Graph - * @description The graph to initialize the session with - */ + data?: components["schemas"]["BatchDatum"][][] | null; + /** @description The graph to initialize the session with */ graph: components["schemas"]["Graph"]; /** * Runs @@ -655,18 +577,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Width * @description The width of the image @@ -687,20 +609,19 @@ export type components = { */ mode?: "RGB" | "RGBA"; /** - * Color * @description The color of the image * @default { - * "r": 0, - * "g": 0, + * "a": 255, * "b": 0, - * "a": 255 + * "g": 0, + * "r": 0 * } */ color?: components["schemas"]["ColorField"]; /** - * Type + * type * @default blank_image - * @enum {string} + * @constant */ type: "blank_image"; }; @@ -719,27 +640,21 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Latents A - * @description Latents tensor - */ + use_cache?: boolean | null; + /** @description Latents tensor */ latents_a?: components["schemas"]["LatentsField"]; - /** - * Latents B - * @description Latents tensor - */ + /** @description Latents tensor */ latents_b?: components["schemas"]["LatentsField"]; /** * Alpha @@ -748,9 +663,9 @@ export type components = { */ alpha?: number; /** - * Type + * type * @default lblend - * @enum {string} + * @constant */ type: "lblend"; }; @@ -760,12 +675,12 @@ export type components = { * Board Name * @description The board's new name. */ - board_name?: string; + board_name?: string | null; /** * Cover Image Name * @description The name of the board's new cover image. */ - cover_image_name?: string; + cover_image_name?: string | null; }; /** * BoardDTO @@ -796,12 +711,12 @@ export type components = { * Deleted At * @description The deleted timestamp of the board. */ - deleted_at?: string; + deleted_at?: string | null; /** * Cover Image Name * @description The name of the board's cover image. */ - cover_image_name?: string; + cover_image_name: string | null; /** * Image Count * @description The number of images in the board. @@ -872,14 +787,11 @@ export type components = { * Board Id * @description The board from which image should be downloaded from */ - board_id?: string; + board_id?: string | null; }; /** Body_enqueue_batch */ Body_enqueue_batch: { - /** - * Batch - * @description Batch to process - */ + /** @description Batch to process */ batch: components["schemas"]["Batch"]; /** * Prepend @@ -890,10 +802,7 @@ export type components = { }; /** Body_enqueue_graph */ Body_enqueue_graph: { - /** - * Graph - * @description The graph to enqueue - */ + /** @description The graph to enqueue */ graph: components["schemas"]["Graph"]; /** * Prepend @@ -912,41 +821,13 @@ export type components = { /** * Prediction Type * @description Prediction type for SDv2 checkpoints and rare SDv1 checkpoints - * @enum {string} */ - prediction_type?: "v_prediction" | "epsilon" | "sample"; + prediction_type?: ("v_prediction" | "epsilon" | "sample") | null; }; /** Body_merge_models */ Body_merge_models: { - /** - * Model Names - * @description model name - */ - model_names: string[]; - /** - * Merged Model Name - * @description Name of destination model - */ - merged_model_name: string; - /** - * Alpha - * @description Alpha weighting strength to apply to 2d and 3d models - * @default 0.5 - */ - alpha?: number; - /** @description Interpolation method */ - interp: components["schemas"]["MergeInterpolationMethod"]; - /** - * Force - * @description Force merging of models created with different versions of diffusers - * @default false - */ - force?: boolean; - /** - * Merge Dest Directory - * @description Save the merged model to the designated directory (with 'merged_model_name' appended) - */ - merge_dest_directory?: string; + /** @description Model configuration */ + body: components["schemas"]["MergeModelsBody"]; }; /** Body_parse_dynamicprompts */ Body_parse_dynamicprompts: { @@ -1023,27 +904,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of boolean values */ collection?: boolean[]; /** - * Type + * type * @default boolean_collection - * @enum {string} + * @constant */ type: "boolean_collection"; }; @@ -1058,9 +939,9 @@ export type components = { */ collection: boolean[]; /** - * Type + * type * @default boolean_collection_output - * @enum {string} + * @constant */ type: "boolean_collection_output"; }; @@ -1079,18 +960,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The boolean value @@ -1098,9 +979,9 @@ export type components = { */ value?: boolean; /** - * Type + * type * @default boolean - * @enum {string} + * @constant */ type: "boolean"; }; @@ -1115,9 +996,9 @@ export type components = { */ value: boolean; /** - * Type + * type * @default boolean_output - * @enum {string} + * @constant */ type: "boolean_output"; }; @@ -1128,19 +1009,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default clip_vision + * @constant */ model_type: "clip_vision"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** CLIPVisionModelField */ CLIPVisionModelField: { @@ -1167,27 +1049,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to infill - */ + use_cache?: boolean | null; + /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default infill_cv2 - * @enum {string} + * @constant */ type: "infill_cv2"; }; @@ -1217,29 +1096,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default canny_image_processor - * @enum {string} - */ - type: "canny_image_processor"; /** * Low Threshold * @description The low threshold of the Canny pixel gradient (0-255) @@ -1252,6 +1122,12 @@ export type components = { * @default 200 */ high_threshold?: number; + /** + * type + * @default canny_image_processor + * @constant + */ + type: "canny_image_processor"; }; /** * ClearResult @@ -1266,15 +1142,9 @@ export type components = { }; /** ClipField */ ClipField: { - /** - * Tokenizer - * @description Info to load tokenizer submodel - */ + /** @description Info to load tokenizer submodel */ tokenizer: components["schemas"]["ModelInfo"]; - /** - * Text Encoder - * @description Info to load text_encoder submodel - */ + /** @description Info to load text_encoder submodel */ text_encoder: components["schemas"]["ModelInfo"]; /** * Skipped Layers @@ -1302,18 +1172,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count @@ -1326,9 +1196,9 @@ export type components = { */ skipped_layers?: number; /** - * Type + * type * @default clip_skip - * @enum {string} + * @constant */ type: "clip_skip"; }; @@ -1341,11 +1211,11 @@ export type components = { * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components["schemas"]["ClipField"] | null; /** - * Type + * type * @default clip_skip_output - * @enum {string} + * @constant */ type: "clip_skip_output"; }; @@ -1364,18 +1234,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection Item * @description The item to collect (all inputs must be of the same type) @@ -1387,18 +1257,13 @@ export type components = { */ collection?: unknown[]; /** - * Type + * type * @default collect - * @enum {string} + * @constant */ type: "collect"; }; - /** - * CollectInvocationOutput - * @description Base class for all invocation outputs. - * - * All invocation outputs must use the `@invocation_output` decorator to provide their unique type. - */ + /** CollectInvocationOutput */ CollectInvocationOutput: { /** * Collection @@ -1406,9 +1271,9 @@ export type components = { */ collection: unknown[]; /** - * Type + * type * @default collect_output - * @enum {string} + * @constant */ type: "collect_output"; }; @@ -1423,9 +1288,9 @@ export type components = { */ collection: components["schemas"]["ColorField"][]; /** - * Type + * type * @default color_collection_output - * @enum {string} + * @constant */ type: "color_collection_output"; }; @@ -1445,33 +1310,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to color-correct - */ + use_cache?: boolean | null; + /** @description The image to color-correct */ image?: components["schemas"]["ImageField"]; - /** - * Reference - * @description Reference image for color-correction - */ + /** @description Reference image for color-correction */ reference?: components["schemas"]["ImageField"]; - /** - * Mask - * @description Mask to use when applying color-correction - */ - mask?: components["schemas"]["ImageField"]; + /** @description Mask to use when applying color-correction */ + mask?: components["schemas"]["ImageField"] | null; /** * Mask Blur Radius * @description Mask blur radius @@ -1479,9 +1335,9 @@ export type components = { */ mask_blur_radius?: number; /** - * Type + * type * @default color_correct - * @enum {string} + * @constant */ type: "color_correct"; }; @@ -1526,33 +1382,32 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** - * Color * @description The color value * @default { - * "r": 0, - * "g": 0, + * "a": 255, * "b": 0, - * "a": 255 + * "g": 0, + * "r": 0 * } */ color?: components["schemas"]["ColorField"]; /** - * Type + * type * @default color - * @enum {string} + * @constant */ type: "color"; }; @@ -1571,50 +1426,44 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default color_map_image_processor - * @enum {string} - */ - type: "color_map_image_processor"; /** * Color Map Tile Size * @description Tile size * @default 64 */ color_map_tile_size?: number; + /** + * type + * @default color_map_image_processor + * @constant + */ + type: "color_map_image_processor"; }; /** * ColorOutput * @description Base class for nodes that output a single color */ ColorOutput: { - /** - * Color - * @description The output color - */ + /** @description The output color */ color: components["schemas"]["ColorField"]; /** - * Type + * type * @default color_output - * @enum {string} + * @constant */ type: "color_output"; }; @@ -1633,35 +1482,35 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Prompt * @description Prompt to be parsed by Compel to create a conditioning tensor * @default */ prompt?: string; - /** - * Type - * @default compel - * @enum {string} - */ - type: "compel"; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ clip?: components["schemas"]["ClipField"]; + /** + * type + * @default compel + * @constant + */ + type: "compel"; }; /** * Conditioning Collection Primitive @@ -1678,27 +1527,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of conditioning tensors */ collection?: components["schemas"]["ConditioningField"][]; /** - * Type + * type * @default conditioning_collection - * @enum {string} + * @constant */ type: "conditioning_collection"; }; @@ -1713,9 +1562,9 @@ export type components = { */ collection: components["schemas"]["ConditioningField"][]; /** - * Type + * type * @default conditioning_collection_output - * @enum {string} + * @constant */ type: "conditioning_collection_output"; }; @@ -1745,27 +1594,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Conditioning - * @description Conditioning tensor - */ + use_cache?: boolean | null; + /** @description Conditioning tensor */ conditioning?: components["schemas"]["ConditioningField"]; /** - * Type + * type * @default conditioning - * @enum {string} + * @constant */ type: "conditioning"; }; @@ -1774,15 +1620,12 @@ export type components = { * @description Base class for nodes that output a single conditioning tensor */ ConditioningOutput: { - /** - * Conditioning - * @description Conditioning tensor - */ + /** @description Conditioning tensor */ conditioning: components["schemas"]["ConditioningField"]; /** - * Type + * type * @default conditioning_output - * @enum {string} + * @constant */ type: "conditioning_output"; }; @@ -1801,29 +1644,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default content_shuffle_image_processor - * @enum {string} - */ - type: "content_shuffle_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -1854,18 +1688,18 @@ export type components = { * @default 256 */ f?: number; + /** + * type + * @default content_shuffle_image_processor + * @constant + */ + type: "content_shuffle_image_processor"; }; /** ControlField */ ControlField: { - /** - * Image - * @description The control image - */ + /** @description The control image */ image: components["schemas"]["ImageField"]; - /** - * Control Model - * @description The ControlNet model to use - */ + /** @description The ControlNet model to use */ control_model: components["schemas"]["ControlNetModelField"]; /** * Control Weight @@ -1915,27 +1749,21 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The control image - */ + use_cache?: boolean | null; + /** @description The control image */ image?: components["schemas"]["ImageField"]; - /** - * Control Model - * @description ControlNet model to load - */ + /** @description ControlNet model to load */ control_model: components["schemas"]["ControlNetModelField"]; /** * Control Weight @@ -1970,9 +1798,9 @@ export type components = { */ resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; /** - * Type + * type * @default controlnet - * @enum {string} + * @constant */ type: "controlnet"; }; @@ -1983,19 +1811,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default controlnet + * @constant */ model_type: "controlnet"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "checkpoint"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Config */ config: string; }; @@ -2006,19 +1835,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default controlnet + * @constant */ model_type: "controlnet"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** * ControlNetModelField @@ -2038,15 +1868,12 @@ export type components = { * @description node output for ControlNet info */ ControlOutput: { - /** - * Control - * @description ControlNet(s) to apply - */ + /** @description ControlNet(s) to apply */ control: components["schemas"]["ControlField"]; /** - * Type + * type * @default control_output - * @enum {string} + * @constant */ type: "control_output"; }; @@ -2065,147 +1892,138 @@ export type components = { * Generation Mode * @description The generation mode that output this image */ - generation_mode?: string; + generation_mode?: string | null; /** * Created By * @description The name of the creator of the image */ - created_by?: string; + created_by: string | null; /** * Positive Prompt * @description The positive prompt parameter */ - positive_prompt?: string; + positive_prompt?: string | null; /** * Negative Prompt * @description The negative prompt parameter */ - negative_prompt?: string; + negative_prompt?: string | null; /** * Width * @description The width parameter */ - width?: number; + width?: number | null; /** * Height * @description The height parameter */ - height?: number; + height?: number | null; /** * Seed * @description The seed used for noise generation */ - seed?: number; + seed?: number | null; /** * Rand Device * @description The device used for random number generation */ - rand_device?: string; + rand_device?: string | null; /** * Cfg Scale * @description The classifier-free guidance scale parameter */ - cfg_scale?: number; + cfg_scale?: number | null; /** * Steps * @description The number of steps used for inference */ - steps?: number; + steps?: number | null; /** * Scheduler * @description The scheduler used for inference */ - scheduler?: string; + scheduler?: string | null; /** * Clip Skip * @description The number of skipped CLIP layers */ - clip_skip?: number; - /** - * Model - * @description The main model used for inference - */ - model?: components["schemas"]["MainModelField"]; + clip_skip?: number | null; + /** @description The main model used for inference */ + model?: components["schemas"]["MainModelField"] | null; /** * Controlnets * @description The ControlNets used for inference */ - controlnets?: components["schemas"]["ControlField"][]; + controlnets?: components["schemas"]["ControlField"][] | null; /** * Ipadapters * @description The IP Adapters used for inference */ - ipAdapters?: components["schemas"]["IPAdapterMetadataField"][]; + ipAdapters?: components["schemas"]["IPAdapterMetadataField"][] | null; /** * T2Iadapters * @description The IP Adapters used for inference */ - t2iAdapters?: components["schemas"]["T2IAdapterField"][]; + t2iAdapters?: components["schemas"]["T2IAdapterField"][] | null; /** * Loras * @description The LoRAs used for inference */ - loras?: components["schemas"]["LoRAMetadataField"][]; - /** - * Vae - * @description The VAE used for decoding, if the main model's default was not used - */ - vae?: components["schemas"]["VAEModelField"]; + loras?: components["schemas"]["LoRAMetadataField"][] | null; + /** @description The VAE used for decoding, if the main model's default was not used */ + vae?: components["schemas"]["VAEModelField"] | null; /** * Strength * @description The strength used for latents-to-latents */ - strength?: number; + strength?: number | null; /** * Init Image * @description The name of the initial image */ - init_image?: string; + init_image?: string | null; /** * Positive Style Prompt * @description The positive style prompt parameter */ - positive_style_prompt?: string; + positive_style_prompt?: string | null; /** * Negative Style Prompt * @description The negative style prompt parameter */ - negative_style_prompt?: string; - /** - * Refiner Model - * @description The SDXL Refiner model used - */ - refiner_model?: components["schemas"]["MainModelField"]; + negative_style_prompt?: string | null; + /** @description The SDXL Refiner model used */ + refiner_model?: components["schemas"]["MainModelField"] | null; /** * Refiner Cfg Scale * @description The classifier-free guidance scale parameter used for the refiner */ - refiner_cfg_scale?: number; + refiner_cfg_scale?: number | null; /** * Refiner Steps * @description The number of steps used for the refiner */ - refiner_steps?: number; + refiner_steps?: number | null; /** * Refiner Scheduler * @description The scheduler used for the refiner */ - refiner_scheduler?: string; + refiner_scheduler?: string | null; /** * Refiner Positive Aesthetic Score * @description The aesthetic score used for the refiner */ - refiner_positive_aesthetic_score?: number; + refiner_positive_aesthetic_score?: number | null; /** * Refiner Negative Aesthetic Score * @description The aesthetic score used for the refiner */ - refiner_negative_aesthetic_score?: number; + refiner_negative_aesthetic_score?: number | null; /** * Refiner Start * @description The start value used for refiner denoising */ - refiner_start?: number; + refiner_start?: number | null; }; /** * Create Denoise Mask @@ -2222,32 +2040,23 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Vae - * @description VAE - */ + use_cache?: boolean | null; + /** @description VAE */ vae?: components["schemas"]["VaeField"]; - /** - * Image - * @description Image which will be masked - */ - image?: components["schemas"]["ImageField"]; - /** - * Mask - * @description The mask to use when pasting - */ + /** @description Image which will be masked */ + image?: components["schemas"]["ImageField"] | null; + /** @description The mask to use when pasting */ mask?: components["schemas"]["ImageField"]; /** * Tiled @@ -2258,21 +2067,17 @@ export type components = { /** * Fp32 * @description Whether or not to use full float32 precision - * @default true + * @default false */ fp32?: boolean; /** - * Type + * type * @default create_denoise_mask - * @enum {string} + * @constant */ type: "create_denoise_mask"; }; - /** - * CursorPaginatedResults[SessionQueueItemDTO] - * @description Cursor-paginated results - * Generic must be a Pydantic model - */ + /** CursorPaginatedResults[SessionQueueItemDTO] */ CursorPaginatedResults_SessionQueueItemDTO_: { /** * Limit @@ -2305,32 +2110,26 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to inpaint - */ + use_cache?: boolean | null; + /** @description The image to inpaint */ image?: components["schemas"]["ImageField"]; - /** - * Mask - * @description The mask to use when inpainting - */ + /** @description The mask to use when inpainting */ mask?: components["schemas"]["ImageField"]; /** - * Type + * type * @default cv_inpaint - * @enum {string} + * @constant */ type: "cv_inpaint"; }; @@ -2372,23 +2171,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Noise - * @description Noise tensor - */ - noise?: components["schemas"]["LatentsField"]; + use_cache?: boolean | null; + /** @description Positive conditioning tensor */ + positive_conditioning?: components["schemas"]["ConditioningField"]; + /** @description Negative conditioning tensor */ + negative_conditioning?: components["schemas"]["ConditioningField"]; + /** @description Noise tensor */ + noise?: components["schemas"]["LatentsField"] | null; /** * Steps * @description Number of steps to run @@ -2420,49 +2220,33 @@ export type components = { * @enum {string} */ scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; - /** Control */ - control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][]; - /** - * IP-Adapter - * @description IP-Adapter to apply - */ - ip_adapter?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][]; - /** - * T2I-Adapter - * @description T2I-Adapter(s) to apply - */ - t2i_adapter?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][]; - /** - * Latents - * @description Latents tensor - */ - latents?: components["schemas"]["LatentsField"]; - /** - * Denoise Mask - * @description The mask to use for the operation - */ - denoise_mask?: components["schemas"]["DenoiseMaskField"]; - /** - * Type - * @default denoise_latents - * @enum {string} - */ - type: "denoise_latents"; - /** - * Positive Conditioning - * @description Positive conditioning tensor - */ - positive_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Negative Conditioning - * @description Negative conditioning tensor - */ - negative_conditioning?: components["schemas"]["ConditioningField"]; /** * UNet * @description UNet (scheduler, LoRAs) */ unet?: components["schemas"]["UNetField"]; + /** Control */ + control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null; + /** + * IP-Adapter + * @description IP-Adapter to apply + */ + ip_adapter?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; + /** + * T2I-Adapter + * @description T2I-Adapter(s) to apply + */ + t2i_adapter?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][] | null; + /** @description Latents tensor */ + latents?: components["schemas"]["LatentsField"] | null; + /** @description The mask to use for the operation */ + denoise_mask?: components["schemas"]["DenoiseMaskField"] | null; + /** + * type + * @default denoise_latents + * @constant + */ + type: "denoise_latents"; }; /** * DenoiseMaskField @@ -2478,22 +2262,19 @@ export type components = { * Masked Latents Name * @description The name of the masked image latents */ - masked_latents_name?: string; + masked_latents_name: string | null; }; /** * DenoiseMaskOutput * @description Base class for nodes that output a single image */ DenoiseMaskOutput: { - /** - * Denoise Mask - * @description Mask for denoise model run - */ + /** @description Mask for denoise model run */ denoise_mask: components["schemas"]["DenoiseMaskField"]; /** - * Type + * type * @default denoise_mask_output - * @enum {string} + * @constant */ type: "denoise_mask_output"; }; @@ -2512,18 +2293,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * A * @description The first number @@ -2537,9 +2318,9 @@ export type components = { */ b?: number; /** - * Type + * type * @default div - * @enum {string} + * @constant */ type: "div"; }; @@ -2558,18 +2339,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default false */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Prompt * @description The prompt to parse with dynamicprompts @@ -2588,9 +2369,9 @@ export type components = { */ combinatorial?: boolean; /** - * Type + * type * @default dynamic_prompt - * @enum {string} + * @constant */ type: "dynamic_prompt"; }; @@ -2599,7 +2380,7 @@ export type components = { /** Prompts */ prompts: string[]; /** Error */ - error?: string; + error?: string | null; }; /** * Upscale (RealESRGAN) @@ -2616,22 +2397,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The input image - */ + use_cache?: boolean | null; + /** @description The input image */ image?: components["schemas"]["ImageField"]; /** * Model Name @@ -2647,23 +2425,17 @@ export type components = { */ tile_size?: number; /** - * Type + * type * @default esrgan - * @enum {string} + * @constant */ type: "esrgan"; }; /** Edge */ Edge: { - /** - * Source - * @description The connection for the edge's from node and field - */ + /** @description The connection for the edge's from node and field */ source: components["schemas"]["EdgeConnection"]; - /** - * Destination - * @description The connection for the edge's to node and field - */ + /** @description The connection for the edge's to node and field */ destination: components["schemas"]["EdgeConnection"]; }; /** EdgeConnection */ @@ -2696,10 +2468,7 @@ export type components = { * @description The total number of queue items requested to be enqueued */ requested: number; - /** - * Batch - * @description The batch that was enqueued - */ + /** @description The batch that was enqueued */ batch: components["schemas"]["Batch"]; /** * Priority @@ -2719,20 +2488,14 @@ export type components = { * @description The total number of queue items requested to be enqueued */ requested: number; - /** - * Batch - * @description The batch that was enqueued - */ + /** @description The batch that was enqueued */ batch: components["schemas"]["Batch"]; /** * Priority * @description The priority of the enqueued batch */ priority: number; - /** - * Queue Item - * @description The queue item that was enqueued - */ + /** @description The queue item that was enqueued */ queue_item: components["schemas"]["SessionQueueItemDTO"]; }; /** @@ -2750,22 +2513,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description Image to face detect - */ + use_cache?: boolean | null; + /** @description Image to face detect */ image?: components["schemas"]["ImageField"]; /** * Minimum Confidence @@ -2780,9 +2540,9 @@ export type components = { */ chunk?: boolean; /** - * Type + * type * @default face_identifier - * @enum {string} + * @constant */ type: "face_identifier"; }; @@ -2801,22 +2561,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description Image to face detect - */ + use_cache?: boolean | null; + /** @description Image to face detect */ image?: components["schemas"]["ImageField"]; /** * Face Ids @@ -2855,9 +2612,9 @@ export type components = { */ invert_mask?: boolean; /** - * Type + * type * @default face_mask_detection - * @enum {string} + * @constant */ type: "face_mask_detection"; }; @@ -2866,10 +2623,7 @@ export type components = { * @description Base class for FaceMask output */ FaceMaskOutput: { - /** - * Image - * @description The output image - */ + /** @description The output image */ image: components["schemas"]["ImageField"]; /** * Width @@ -2882,15 +2636,12 @@ export type components = { */ height: number; /** - * Type + * type * @default face_mask_output - * @enum {string} + * @constant */ type: "face_mask_output"; - /** - * Mask - * @description The output mask - */ + /** @description The output mask */ mask: components["schemas"]["ImageField"]; }; /** @@ -2908,22 +2659,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description Image for face detection - */ + use_cache?: boolean | null; + /** @description Image for face detection */ image?: components["schemas"]["ImageField"]; /** * Face Id @@ -2962,9 +2710,9 @@ export type components = { */ chunk?: boolean; /** - * Type + * type * @default face_off - * @enum {string} + * @constant */ type: "face_off"; }; @@ -2973,10 +2721,7 @@ export type components = { * @description Base class for FaceOff Output */ FaceOffOutput: { - /** - * Image - * @description The output image - */ + /** @description The output image */ image: components["schemas"]["ImageField"]; /** * Width @@ -2989,15 +2734,12 @@ export type components = { */ height: number; /** - * Type + * type * @default face_off_output - * @enum {string} + * @constant */ type: "face_off_output"; - /** - * Mask - * @description The output mask - */ + /** @description The output mask */ mask: components["schemas"]["ImageField"]; /** * X @@ -3025,27 +2767,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of float values */ collection?: number[]; /** - * Type + * type * @default float_collection - * @enum {string} + * @constant */ type: "float_collection"; }; @@ -3060,9 +2802,9 @@ export type components = { */ collection: number[]; /** - * Type + * type * @default float_collection_output - * @enum {string} + * @constant */ type: "float_collection_output"; }; @@ -3081,18 +2823,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The float value @@ -3100,9 +2842,9 @@ export type components = { */ value?: number; /** - * Type + * type * @default float - * @enum {string} + * @constant */ type: "float"; }; @@ -3121,18 +2863,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Start * @description The first value of the range @@ -3152,9 +2894,9 @@ export type components = { */ steps?: number; /** - * Type + * type * @default float_range - * @enum {string} + * @constant */ type: "float_range"; }; @@ -3173,18 +2915,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Operation * @description The operation to perform @@ -3205,9 +2947,9 @@ export type components = { */ b?: number; /** - * Type + * type * @default float_math - * @enum {string} + * @constant */ type: "float_math"; }; @@ -3222,9 +2964,9 @@ export type components = { */ value: number; /** - * Type + * type * @default float_output - * @enum {string} + * @constant */ type: "float_output"; }; @@ -3243,18 +2985,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The value to round @@ -3275,9 +3017,9 @@ export type components = { */ method?: "Nearest" | "Floor" | "Ceiling" | "Truncate"; /** - * Type + * type * @default float_to_int - * @enum {string} + * @constant */ type: "float_to_int"; }; @@ -3293,7 +3035,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; + [key: string]: components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["MainModelLoaderInvocation"]; }; /** * Edges @@ -3311,15 +3053,9 @@ export type components = { * @description The id of the execution state */ id: string; - /** - * Graph - * @description The graph being executed - */ + /** @description The graph being executed */ graph: components["schemas"]["Graph"]; - /** - * Execution Graph - * @description The expanded graph of activated and executed nodes - */ + /** @description The expanded graph of activated and executed nodes */ execution_graph: components["schemas"]["Graph"]; /** * Executed @@ -3336,7 +3072,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["BooleanOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["String2Output"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"]; + [key: string]: components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["String2Output"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SchedulerOutput"]; }; /** * Errors @@ -3375,41 +3111,33 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Graph - * @description The graph to run - */ + use_cache?: boolean | null; + /** @description The graph to run */ graph?: components["schemas"]["Graph"]; /** - * Type + * type * @default graph - * @enum {string} + * @constant */ type: "graph"; }; - /** - * GraphInvocationOutput - * @description Base class for all invocation outputs. - * - * All invocation outputs must use the `@invocation_output` decorator to provide their unique type. - */ + /** GraphInvocationOutput */ GraphInvocationOutput: { /** - * Type + * type * @default graph_output - * @enum {string} + * @constant */ type: "graph_output"; }; @@ -3433,29 +3161,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default hed_image_processor - * @enum {string} - */ - type: "hed_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -3474,23 +3193,20 @@ export type components = { * @default false */ scribble?: boolean; + /** + * type + * @default hed_image_processor + * @constant + */ + type: "hed_image_processor"; }; /** IPAdapterField */ IPAdapterField: { - /** - * Image - * @description The IP-Adapter image prompt. - */ + /** @description The IP-Adapter image prompt. */ image: components["schemas"]["ImageField"]; - /** - * Ip Adapter Model - * @description The IP-Adapter model to use. - */ + /** @description The IP-Adapter model to use. */ ip_adapter_model: components["schemas"]["IPAdapterModelField"]; - /** - * Image Encoder Model - * @description The name of the CLIP image encoder model. - */ + /** @description The name of the CLIP image encoder model. */ image_encoder_model: components["schemas"]["CLIPVisionModelField"]; /** * Weight @@ -3526,22 +3242,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The IP-Adapter image prompt. - */ + use_cache?: boolean | null; + /** @description The IP-Adapter image prompt. */ image?: components["schemas"]["ImageField"]; /** * IP-Adapter Model @@ -3567,23 +3280,17 @@ export type components = { */ end_step_percent?: number; /** - * Type + * type * @default ip_adapter - * @enum {string} + * @constant */ type: "ip_adapter"; }; /** IPAdapterMetadataField */ IPAdapterMetadataField: { - /** - * Image - * @description The IP-Adapter image prompt. - */ + /** @description The IP-Adapter image prompt. */ image: components["schemas"]["ImageField"]; - /** - * Ip Adapter Model - * @description The IP-Adapter model to use. - */ + /** @description The IP-Adapter model to use. */ ip_adapter_model: components["schemas"]["IPAdapterModelField"]; /** * Weight @@ -3620,26 +3327,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default ip_adapter + * @constant */ model_type: "ip_adapter"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "invokeai"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; - /** - * IPAdapterOutput - * @description Base class for all invocation outputs. - * - * All invocation outputs must use the `@invocation_output` decorator to provide their unique type. - */ + /** IPAdapterOutput */ IPAdapterOutput: { /** * IP-Adapter @@ -3647,9 +3350,9 @@ export type components = { */ ip_adapter: components["schemas"]["IPAdapterField"]; /** - * Type + * type * @default ip_adapter_output - * @enum {string} + * @constant */ type: "ip_adapter_output"; }; @@ -3668,22 +3371,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to blur - */ + use_cache?: boolean | null; + /** @description The image to blur */ image?: components["schemas"]["ImageField"]; /** * Radius @@ -3699,9 +3399,9 @@ export type components = { */ blur_type?: "gaussian" | "box"; /** - * Type + * type * @default img_blur - * @enum {string} + * @constant */ type: "img_blur"; }; @@ -3732,22 +3432,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to get the channel from - */ + use_cache?: boolean | null; + /** @description The image to get the channel from */ image?: components["schemas"]["ImageField"]; /** * Channel @@ -3757,9 +3454,9 @@ export type components = { */ channel?: "A" | "R" | "G" | "B"; /** - * Type + * type * @default img_chan - * @enum {string} + * @constant */ type: "img_chan"; }; @@ -3778,22 +3475,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to adjust - */ + use_cache?: boolean | null; + /** @description The image to adjust */ image?: components["schemas"]["ImageField"]; /** * Channel @@ -3814,9 +3508,9 @@ export type components = { */ invert_channel?: boolean; /** - * Type + * type * @default img_channel_multiply - * @enum {string} + * @constant */ type: "img_channel_multiply"; }; @@ -3835,22 +3529,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to adjust - */ + use_cache?: boolean | null; + /** @description The image to adjust */ image?: components["schemas"]["ImageField"]; /** * Channel @@ -3865,9 +3556,9 @@ export type components = { */ offset?: number; /** - * Type + * type * @default img_channel_offset - * @enum {string} + * @constant */ type: "img_channel_offset"; }; @@ -3886,27 +3577,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of image values */ collection?: components["schemas"]["ImageField"][]; /** - * Type + * type * @default image_collection - * @enum {string} + * @constant */ type: "image_collection"; }; @@ -3921,9 +3612,9 @@ export type components = { */ collection: components["schemas"]["ImageField"][]; /** - * Type + * type * @default image_collection_output - * @enum {string} + * @constant */ type: "image_collection_output"; }; @@ -3942,22 +3633,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to convert - */ + use_cache?: boolean | null; + /** @description The image to convert */ image?: components["schemas"]["ImageField"]; /** * Mode @@ -3967,9 +3655,9 @@ export type components = { */ mode?: "L" | "RGB" | "RGBA" | "CMYK" | "YCbCr" | "LAB" | "HSV" | "I" | "F"; /** - * Type + * type * @default img_conv - * @enum {string} + * @constant */ type: "img_conv"; }; @@ -3988,22 +3676,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to crop - */ + use_cache?: boolean | null; + /** @description The image to crop */ image?: components["schemas"]["ImageField"]; /** * X @@ -4030,9 +3715,9 @@ export type components = { */ height?: number; /** - * Type + * type * @default img_crop - * @enum {string} + * @constant */ type: "img_crop"; }; @@ -4084,7 +3769,7 @@ export type components = { * Deleted At * @description The deleted timestamp of the image. */ - deleted_at?: string; + deleted_at?: string | null; /** * Is Intermediate * @description Whether this is an intermediate image. @@ -4094,12 +3779,12 @@ export type components = { * Session Id * @description The session ID that generated this image, if it is a generated image. */ - session_id?: string; + session_id?: string | null; /** * Node Id * @description The node ID that generated this image, if it is a generated image. */ - node_id?: string; + node_id?: string | null; /** * Starred * @description Whether this image is starred. @@ -4109,7 +3794,7 @@ export type components = { * Board Id * @description The id of the board the image belongs to, if one exists. */ - board_id?: string; + board_id?: string | null; }; /** * ImageField @@ -4137,22 +3822,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to adjust - */ + use_cache?: boolean | null; + /** @description The image to adjust */ image?: components["schemas"]["ImageField"]; /** * Hue @@ -4161,9 +3843,9 @@ export type components = { */ hue?: number; /** - * Type + * type * @default img_hue_adjust - * @enum {string} + * @constant */ type: "img_hue_adjust"; }; @@ -4182,22 +3864,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to lerp - */ + use_cache?: boolean | null; + /** @description The image to lerp */ image?: components["schemas"]["ImageField"]; /** * Min @@ -4212,9 +3891,9 @@ export type components = { */ max?: number; /** - * Type + * type * @default img_ilerp - * @enum {string} + * @constant */ type: "img_ilerp"; }; @@ -4233,27 +3912,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to load - */ + use_cache?: boolean | null; + /** @description The image to load */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default image - * @enum {string} + * @constant */ type: "image"; }; @@ -4272,22 +3948,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to lerp - */ + use_cache?: boolean | null; + /** @description The image to lerp */ image?: components["schemas"]["ImageField"]; /** * Min @@ -4302,9 +3975,9 @@ export type components = { */ max?: number; /** - * Type + * type * @default img_lerp - * @enum {string} + * @constant */ type: "img_lerp"; }; @@ -4317,12 +3990,12 @@ export type components = { * Metadata * @description The image's core metadata, if it was created in the Linear or Canvas UI */ - metadata?: Record; + metadata?: Record | null; /** * Graph * @description The graph that created the image */ - graph?: Record; + graph?: Record | null; }; /** * Multiply Images @@ -4339,32 +4012,26 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image1 - * @description The first image to multiply - */ + use_cache?: boolean | null; + /** @description The first image to multiply */ image1?: components["schemas"]["ImageField"]; - /** - * Image2 - * @description The second image to multiply - */ + /** @description The second image to multiply */ image2?: components["schemas"]["ImageField"]; /** - * Type + * type * @default img_mul - * @enum {string} + * @constant */ type: "img_mul"; }; @@ -4383,44 +4050,35 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; + /** @description The image to check */ + image?: components["schemas"]["ImageField"]; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default img_nsfw - * @enum {string} + * @constant */ type: "img_nsfw"; - /** - * Image - * @description The image to check - */ - image?: components["schemas"]["ImageField"]; }; /** * ImageOutput * @description Base class for nodes that output a single image */ ImageOutput: { - /** - * Image - * @description The output image - */ + /** @description The output image */ image: components["schemas"]["ImageField"]; /** * Width @@ -4433,9 +4091,9 @@ export type components = { */ height: number; /** - * Type + * type * @default image_output - * @enum {string} + * @constant */ type: "image_output"; }; @@ -4454,33 +4112,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Base Image - * @description The base image - */ + use_cache?: boolean | null; + /** @description The base image */ base_image?: components["schemas"]["ImageField"]; - /** - * Image - * @description The image to paste - */ + /** @description The image to paste */ image?: components["schemas"]["ImageField"]; - /** - * Mask - * @description The mask to use when pasting - */ - mask?: components["schemas"]["ImageField"]; + /** @description The mask to use when pasting */ + mask?: components["schemas"]["ImageField"] | null; /** * X * @description The left x coordinate at which to paste the image @@ -4500,51 +4149,12 @@ export type components = { */ crop?: boolean; /** - * Type + * type * @default img_paste - * @enum {string} + * @constant */ type: "img_paste"; }; - /** - * Base Image Processor - * @description Base class for invocations that preprocess images for ControlNet - */ - ImageProcessorInvocation: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Workflow - * @description The workflow to save with the image - */ - workflow?: string; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ - image?: components["schemas"]["ImageField"]; - /** - * Type - * @default image_processor - * @enum {string} - */ - type: "image_processor"; - }; /** * ImageRecordChanges * @description A set of changes to apply to an image record. @@ -4557,22 +4167,23 @@ export type components = { */ ImageRecordChanges: { /** @description The image's new category. */ - image_category?: components["schemas"]["ImageCategory"]; + image_category?: components["schemas"]["ImageCategory"] | null; /** * Session Id * @description The image's new session ID. */ - session_id?: string; + session_id?: string | null; /** * Is Intermediate * @description The image's new `is_intermediate` flag. */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Starred * @description The image's new `starred` state */ - starred?: boolean; + starred?: boolean | null; + [key: string]: unknown; }; /** * Resize Image @@ -4589,22 +4200,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to resize - */ + use_cache?: boolean | null; + /** @description The image to resize */ image?: components["schemas"]["ImageField"]; /** * Width @@ -4625,15 +4233,12 @@ export type components = { * @enum {string} */ resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default img_resize - * @enum {string} + * @constant */ type: "img_resize"; }; @@ -4652,22 +4257,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to scale - */ + use_cache?: boolean | null; + /** @description The image to scale */ image?: components["schemas"]["ImageField"]; /** * Scale Factor @@ -4683,9 +4285,9 @@ export type components = { */ resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; /** - * Type + * type * @default img_scale - * @enum {string} + * @constant */ type: "img_scale"; }; @@ -4704,27 +4306,21 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to encode - */ + use_cache?: boolean | null; + /** @description The image to encode */ image?: components["schemas"]["ImageField"]; - /** - * Vae - * @description VAE - */ + /** @description VAE */ vae?: components["schemas"]["VaeField"]; /** * Tiled @@ -4735,13 +4331,13 @@ export type components = { /** * Fp32 * @description Whether or not to use full float32 precision - * @default true + * @default false */ fp32?: boolean; /** - * Type + * type * @default i2l - * @enum {string} + * @constant */ type: "i2l"; }; @@ -4781,22 +4377,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to check - */ + use_cache?: boolean | null; + /** @description The image to check */ image?: components["schemas"]["ImageField"]; /** * Text @@ -4804,15 +4397,12 @@ export type components = { * @default InvokeAI */ text?: string; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default img_watermark - * @enum {string} + * @constant */ type: "img_watermark"; }; @@ -4822,7 +4412,7 @@ export type components = { * Response * @description If defined, the message to display to the user when images begin downloading */ - response?: string; + response: string | null; }; /** ImagesUpdatedFromListResult */ ImagesUpdatedFromListResult: { @@ -4847,38 +4437,34 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to infill - */ + use_cache?: boolean | null; + /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** - * Color * @description The color to use to infill * @default { - * "r": 127, - * "g": 127, + * "a": 255, * "b": 127, - * "a": 255 + * "g": 127, + * "r": 127 * } */ color?: components["schemas"]["ColorField"]; /** - * Type + * type * @default infill_rgba - * @enum {string} + * @constant */ type: "infill_rgba"; }; @@ -4897,22 +4483,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to infill - */ + use_cache?: boolean | null; + /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** * Downscale @@ -4928,9 +4511,9 @@ export type components = { */ resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; /** - * Type + * type * @default infill_patchmatch - * @enum {string} + * @constant */ type: "infill_patchmatch"; }; @@ -4949,22 +4532,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to infill - */ + use_cache?: boolean | null; + /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** * Tile Size @@ -4978,9 +4558,9 @@ export type components = { */ seed?: number; /** - * Type + * type * @default infill_tile - * @enum {string} + * @constant */ type: "infill_tile"; }; @@ -4999,27 +4579,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of integer values */ collection?: number[]; /** - * Type + * type * @default integer_collection - * @enum {string} + * @constant */ type: "integer_collection"; }; @@ -5034,9 +4614,9 @@ export type components = { */ collection: number[]; /** - * Type + * type * @default integer_collection_output - * @enum {string} + * @constant */ type: "integer_collection_output"; }; @@ -5055,18 +4635,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The integer value @@ -5074,9 +4654,9 @@ export type components = { */ value?: number; /** - * Type + * type * @default integer - * @enum {string} + * @constant */ type: "integer"; }; @@ -5095,18 +4675,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Operation * @description The operation to perform @@ -5127,9 +4707,9 @@ export type components = { */ b?: number; /** - * Type + * type * @default integer_math - * @enum {string} + * @constant */ type: "integer_math"; }; @@ -5144,9 +4724,9 @@ export type components = { */ value: number; /** - * Type + * type * @default integer_output - * @enum {string} + * @constant */ type: "integer_output"; }; @@ -5193,18 +4773,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The list of items to iterate over @@ -5217,9 +4797,9 @@ export type components = { */ index?: number; /** - * Type + * type * @default iterate - * @enum {string} + * @constant */ type: "iterate"; }; @@ -5232,11 +4812,11 @@ export type components = { * Collection Item * @description The item being iterated over */ - item?: unknown; + item: unknown; /** - * Type + * type * @default iterate_output - * @enum {string} + * @constant */ type: "iterate_output"; }; @@ -5255,27 +4835,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to infill - */ + use_cache?: boolean | null; + /** @description The image to infill */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default infill_lama - * @enum {string} + * @constant */ type: "infill_lama"; }; @@ -5294,27 +4871,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of latents tensors */ collection?: components["schemas"]["LatentsField"][]; /** - * Type + * type * @default latents_collection - * @enum {string} + * @constant */ type: "latents_collection"; }; @@ -5329,9 +4906,9 @@ export type components = { */ collection: components["schemas"]["LatentsField"][]; /** - * Type + * type * @default latents_collection_output - * @enum {string} + * @constant */ type: "latents_collection_output"; }; @@ -5349,7 +4926,7 @@ export type components = { * Seed * @description Seed used to generate this latents */ - seed?: number; + seed?: number | null; }; /** * Latents Primitive @@ -5366,27 +4943,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Latents - * @description The latents tensor - */ + use_cache?: boolean | null; + /** @description The latents tensor */ latents?: components["schemas"]["LatentsField"]; /** - * Type + * type * @default latents - * @enum {string} + * @constant */ type: "latents"; }; @@ -5395,10 +4969,7 @@ export type components = { * @description Base class for nodes that output a single latents tensor */ LatentsOutput: { - /** - * Latents - * @description Latents tensor - */ + /** @description Latents tensor */ latents: components["schemas"]["LatentsField"]; /** * Width @@ -5411,9 +4982,9 @@ export type components = { */ height: number; /** - * Type + * type * @default latents_output - * @enum {string} + * @constant */ type: "latents_output"; }; @@ -5432,18 +5003,22 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; + /** @description Latents tensor */ + latents?: components["schemas"]["LatentsField"]; + /** @description VAE */ + vae?: components["schemas"]["VaeField"]; /** * Tiled * @description Processing using overlapping tiles (reduce memory consumption) @@ -5453,30 +5028,17 @@ export type components = { /** * Fp32 * @description Whether or not to use full float32 precision - * @default true + * @default false */ fp32?: boolean; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default l2i - * @enum {string} + * @constant */ type: "l2i"; - /** - * Latents - * @description Latents tensor - */ - latents?: components["schemas"]["LatentsField"]; - /** - * Vae - * @description VAE - */ - vae?: components["schemas"]["VaeField"]; }; /** * Leres (Depth) Processor @@ -5493,29 +5055,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default leres_image_processor - * @enum {string} - */ - type: "leres_image_processor"; /** * Thr A * @description Leres parameter `thr_a` @@ -5546,6 +5099,12 @@ export type components = { * @default 512 */ image_resolution?: number; + /** + * type + * @default leres_image_processor + * @constant + */ + type: "leres_image_processor"; }; /** * Lineart Anime Processor @@ -5562,29 +5121,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default lineart_anime_image_processor - * @enum {string} - */ - type: "lineart_anime_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -5597,6 +5147,12 @@ export type components = { * @default 512 */ image_resolution?: number; + /** + * type + * @default lineart_anime_image_processor + * @constant + */ + type: "lineart_anime_image_processor"; }; /** * Lineart Processor @@ -5613,29 +5169,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default lineart_image_processor - * @enum {string} - */ - type: "lineart_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -5654,16 +5201,19 @@ export type components = { * @default false */ coarse?: boolean; + /** + * type + * @default lineart_image_processor + * @constant + */ + type: "lineart_image_processor"; }; /** * LoRAMetadataField * @description LoRA metadata for an image generated in InvokeAI. */ LoRAMetadataField: { - /** - * Lora - * @description The LoRA model - */ + /** @description The LoRA model */ lora: components["schemas"]["LoRAModelField"]; /** * Weight @@ -5678,15 +5228,16 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default lora + * @constant */ model_type: "lora"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; model_format: components["schemas"]["LoRAModelFormat"]; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** * LoRAModelField @@ -5703,13 +5254,11 @@ export type components = { }; /** * LoRAModelFormat - * @description An enumeration. * @enum {string} */ LoRAModelFormat: "lycoris" | "diffusers"; /** * LogLevel - * @description An enumeration. * @enum {integer} */ LogLevel: 0 | 10 | 20 | 30 | 40 | 50; @@ -5725,7 +5274,7 @@ export type components = { /** @description Info to load submodel */ model_type: components["schemas"]["ModelType"]; /** @description Info to load submodel */ - submodel?: components["schemas"]["SubModelType"]; + submodel?: components["schemas"]["SubModelType"] | null; /** * Weight * @description Lora's weight which to use when apply to model @@ -5747,18 +5296,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * LoRA * @description LoRA model to load @@ -5774,16 +5323,16 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components["schemas"]["ClipField"] | null; /** - * Type + * type * @default lora_loader - * @enum {string} + * @constant */ type: "lora_loader"; }; @@ -5796,16 +5345,16 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components["schemas"]["ClipField"] | null; /** - * Type + * type * @default lora_loader_output - * @enum {string} + * @constant */ type: "lora_loader_output"; }; @@ -5839,27 +5388,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Model - * @description Main model (UNet, VAE, CLIP) to load - */ + use_cache?: boolean | null; + /** @description Main model (UNet, VAE, CLIP) to load */ model: components["schemas"]["MainModelField"]; /** - * Type + * type * @default main_model_loader - * @enum {string} + * @constant */ type: "main_model_loader"; }; @@ -5878,32 +5424,26 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Mask1 - * @description The first mask to combine - */ + use_cache?: boolean | null; + /** @description The first mask to combine */ mask1?: components["schemas"]["ImageField"]; - /** - * Mask2 - * @description The second image to combine - */ + /** @description The second image to combine */ mask2?: components["schemas"]["ImageField"]; /** - * Type + * type * @default mask_combine - * @enum {string} + * @constant */ type: "mask_combine"; }; @@ -5922,22 +5462,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to apply the mask to - */ + use_cache?: boolean | null; + /** @description The image to apply the mask to */ image?: components["schemas"]["ImageField"]; /** * Edge Size @@ -5960,9 +5497,9 @@ export type components = { */ high_threshold?: number; /** - * Type + * type * @default mask_edge - * @enum {string} + * @constant */ type: "mask_edge"; }; @@ -5981,22 +5518,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to create the mask from - */ + use_cache?: boolean | null; + /** @description The image to create the mask from */ image?: components["schemas"]["ImageField"]; /** * Invert @@ -6005,9 +5539,9 @@ export type components = { */ invert?: boolean; /** - * Type + * type * @default tomask - * @enum {string} + * @constant */ type: "tomask"; }; @@ -6026,29 +5560,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default mediapipe_face_processor - * @enum {string} - */ - type: "mediapipe_face_processor"; /** * Max Faces * @description Maximum number of faces to detect @@ -6061,13 +5586,50 @@ export type components = { * @default 0.5 */ min_confidence?: number; + /** + * type + * @default mediapipe_face_processor + * @constant + */ + type: "mediapipe_face_processor"; }; /** * MergeInterpolationMethod - * @description An enumeration. * @enum {string} */ MergeInterpolationMethod: "weighted_sum" | "sigmoid" | "inv_sigmoid" | "add_difference"; + /** MergeModelsBody */ + MergeModelsBody: { + /** + * Model Names + * @description model name + */ + model_names: string[]; + /** + * Merged Model Name + * @description Name of destination model + */ + merged_model_name: string | null; + /** + * Alpha + * @description Alpha weighting strength to apply to 2d and 3d models + * @default 0.5 + */ + alpha?: number | null; + /** @description Interpolation method */ + interp: components["schemas"]["MergeInterpolationMethod"] | null; + /** + * Force + * @description Force merging of models created with different versions of diffusers + * @default false + */ + force?: boolean | null; + /** + * Merge Dest Directory + * @description Save the merged model to the designated directory (with 'merged_model_name' appended) + */ + merge_dest_directory?: string | null; + }; /** * Metadata Accumulator * @description Outputs a Core Metadata Object @@ -6083,177 +5645,168 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Generation Mode * @description The generation mode that output this image */ - generation_mode?: string; + generation_mode?: string | null; /** * Positive Prompt * @description The positive prompt parameter */ - positive_prompt?: string; + positive_prompt?: string | null; /** * Negative Prompt * @description The negative prompt parameter */ - negative_prompt?: string; + negative_prompt?: string | null; /** * Width * @description The width parameter */ - width?: number; + width?: number | null; /** * Height * @description The height parameter */ - height?: number; + height?: number | null; /** * Seed * @description The seed used for noise generation */ - seed?: number; + seed?: number | null; /** * Rand Device * @description The device used for random number generation */ - rand_device?: string; + rand_device?: string | null; /** * Cfg Scale * @description The classifier-free guidance scale parameter */ - cfg_scale?: number; + cfg_scale?: number | null; /** * Steps * @description The number of steps used for inference */ - steps?: number; + steps?: number | null; /** * Scheduler * @description The scheduler used for inference */ - scheduler?: string; + scheduler?: string | null; /** * Clip Skip * @description The number of skipped CLIP layers */ - clip_skip?: number; - /** - * Model - * @description The main model used for inference - */ - model?: components["schemas"]["MainModelField"]; + clip_skip?: number | null; + /** @description The main model used for inference */ + model?: components["schemas"]["MainModelField"] | null; /** * Controlnets * @description The ControlNets used for inference */ - controlnets?: components["schemas"]["ControlField"][]; + controlnets?: components["schemas"]["ControlField"][] | null; /** * Ipadapters * @description The IP Adapters used for inference */ - ipAdapters?: components["schemas"]["IPAdapterMetadataField"][]; + ipAdapters?: components["schemas"]["IPAdapterMetadataField"][] | null; /** * T2Iadapters * @description The IP Adapters used for inference */ - t2iAdapters?: components["schemas"]["T2IAdapterField"][]; + t2iAdapters?: components["schemas"]["T2IAdapterField"][] | null; /** * Loras * @description The LoRAs used for inference */ - loras?: components["schemas"]["LoRAMetadataField"][]; + loras?: components["schemas"]["LoRAMetadataField"][] | null; /** * Strength * @description The strength used for latents-to-latents */ - strength?: number; + strength?: number | null; /** * Init Image * @description The name of the initial image */ - init_image?: string; - /** - * Vae - * @description The VAE used for decoding, if the main model's default was not used - */ - vae?: components["schemas"]["VAEModelField"]; + init_image?: string | null; + /** @description The VAE used for decoding, if the main model's default was not used */ + vae?: components["schemas"]["VAEModelField"] | null; /** * Hrf Width * @description The high resolution fix height and width multipler. */ - hrf_width?: number; + hrf_width?: number | null; /** * Hrf Height * @description The high resolution fix height and width multipler. */ - hrf_height?: number; + hrf_height?: number | null; /** * Hrf Strength * @description The high resolution fix img2img strength used in the upscale pass. */ - hrf_strength?: number; + hrf_strength?: number | null; /** * Positive Style Prompt * @description The positive style prompt parameter */ - positive_style_prompt?: string; + positive_style_prompt?: string | null; /** * Negative Style Prompt * @description The negative style prompt parameter */ - negative_style_prompt?: string; - /** - * Refiner Model - * @description The SDXL Refiner model used - */ - refiner_model?: components["schemas"]["MainModelField"]; + negative_style_prompt?: string | null; + /** @description The SDXL Refiner model used */ + refiner_model?: components["schemas"]["MainModelField"] | null; /** * Refiner Cfg Scale * @description The classifier-free guidance scale parameter used for the refiner */ - refiner_cfg_scale?: number; + refiner_cfg_scale?: number | null; /** * Refiner Steps * @description The number of steps used for the refiner */ - refiner_steps?: number; + refiner_steps?: number | null; /** * Refiner Scheduler * @description The scheduler used for the refiner */ - refiner_scheduler?: string; + refiner_scheduler?: string | null; /** * Refiner Positive Aesthetic Score * @description The aesthetic score used for the refiner */ - refiner_positive_aesthetic_score?: number; + refiner_positive_aesthetic_score?: number | null; /** * Refiner Negative Aesthetic Score * @description The aesthetic score used for the refiner */ - refiner_negative_aesthetic_score?: number; + refiner_negative_aesthetic_score?: number | null; /** * Refiner Start * @description The start value used for refiner denoising */ - refiner_start?: number; + refiner_start?: number | null; /** - * Type + * type * @default metadata_accumulator - * @enum {string} + * @constant */ type: "metadata_accumulator"; }; @@ -6262,15 +5815,12 @@ export type components = { * @description The output of the MetadataAccumulator node */ MetadataAccumulatorOutput: { - /** - * Metadata - * @description The core metadata for the image - */ + /** @description The core metadata for the image */ metadata: components["schemas"]["CoreMetadata"]; /** - * Type + * type * @default metadata_accumulator_output - * @enum {string} + * @constant */ type: "metadata_accumulator_output"; }; @@ -6289,29 +5839,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default midas_depth_image_processor - * @enum {string} - */ - type: "midas_depth_image_processor"; /** * A Mult * @description Midas parameter `a_mult` (a = a_mult * PI) @@ -6324,6 +5865,12 @@ export type components = { * @default 0.1 */ bg_th?: number; + /** + * type + * @default midas_depth_image_processor + * @constant + */ + type: "midas_depth_image_processor"; }; /** * MLSD Processor @@ -6340,29 +5887,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default mlsd_image_processor - * @enum {string} - */ - type: "mlsd_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -6387,11 +5925,16 @@ export type components = { * @default 0.1 */ thr_d?: number; + /** + * type + * @default mlsd_image_processor + * @constant + */ + type: "mlsd_image_processor"; }; /** * ModelError - * @description An enumeration. - * @enum {string} + * @constant */ ModelError: "not_found"; /** ModelInfo */ @@ -6406,7 +5949,7 @@ export type components = { /** @description Info to load submodel */ model_type: components["schemas"]["ModelType"]; /** @description Info to load submodel */ - submodel?: components["schemas"]["SubModelType"]; + submodel?: components["schemas"]["SubModelType"] | null; }; /** * ModelLoaderOutput @@ -6429,21 +5972,19 @@ export type components = { */ vae: components["schemas"]["VaeField"]; /** - * Type + * type * @default model_loader_output - * @enum {string} + * @constant */ type: "model_loader_output"; }; /** * ModelType - * @description An enumeration. * @enum {string} */ ModelType: "onnx" | "main" | "vae" | "lora" | "controlnet" | "embedding" | "ip_adapter" | "clip_vision" | "t2i_adapter"; /** * ModelVariantType - * @description An enumeration. * @enum {string} */ ModelVariantType: "normal" | "inpaint" | "depth"; @@ -6467,18 +6008,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * A * @description The first number @@ -6492,9 +6033,9 @@ export type components = { */ b?: number; /** - * Type + * type * @default mul - * @enum {string} + * @constant */ type: "mul"; }; @@ -6531,18 +6072,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Seed * @description Seed for random number generation @@ -6567,9 +6108,9 @@ export type components = { */ use_cpu?: boolean; /** - * Type + * type * @default noise - * @enum {string} + * @constant */ type: "noise"; }; @@ -6578,11 +6119,8 @@ export type components = { * @description Invocation noise output */ NoiseOutput: { - /** - * Noise - * @description Noise tensor - */ - noise?: components["schemas"]["LatentsField"]; + /** @description Noise tensor */ + noise: components["schemas"]["LatentsField"]; /** * Width * @description Width of output (px) @@ -6594,9 +6132,9 @@ export type components = { */ height: number; /** - * Type + * type * @default noise_output - * @enum {string} + * @constant */ type: "noise_output"; }; @@ -6615,29 +6153,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default normalbae_image_processor - * @enum {string} - */ - type: "normalbae_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -6650,6 +6179,12 @@ export type components = { * @default 512 */ image_resolution?: number; + /** + * type + * @default normalbae_image_processor + * @constant + */ + type: "normalbae_image_processor"; }; /** * ONNX Latents to Image @@ -6666,37 +6201,28 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Latents - * @description Denoised latents tensor - */ + use_cache?: boolean | null; + /** @description Denoised latents tensor */ latents?: components["schemas"]["LatentsField"]; - /** - * Vae - * @description VAE - */ + /** @description VAE */ vae?: components["schemas"]["VaeField"]; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default l2i_onnx - * @enum {string} + * @constant */ type: "l2i_onnx"; }; @@ -6726,19 +6252,13 @@ export type components = { */ vae_encoder?: components["schemas"]["VaeField"]; /** - * Type + * type * @default model_loader_output_onnx - * @enum {string} + * @constant */ type: "model_loader_output_onnx"; }; - /** - * ONNX Prompt (Raw) - * @description A node to process inputs and produce outputs. - * May use dependency injection in __init__ to receive providers. - * - * All invocations must use the `@invocation` decorator to provide their unique type. - */ + /** ONNX Prompt (Raw) */ ONNXPromptInvocation: { /** * Id @@ -6750,33 +6270,30 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Prompt * @description Raw prompt text (no parsing) * @default */ prompt?: string; - /** - * Clip - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ + /** @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ clip?: components["schemas"]["ClipField"]; /** - * Type + * type * @default prompt_onnx - * @enum {string} + * @constant */ type: "prompt_onnx"; }; @@ -6787,19 +6304,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default onnx + * @constant */ model_type: "onnx"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "onnx"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; variant: components["schemas"]["ModelVariantType"]; }; /** ONNXStableDiffusion2ModelConfig */ @@ -6809,19 +6327,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default onnx + * @constant */ model_type: "onnx"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "onnx"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; variant: components["schemas"]["ModelVariantType"]; prediction_type: components["schemas"]["SchedulerPredictionType"]; /** Upcast Attention */ @@ -6842,32 +6361,23 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Positive Conditioning - * @description Positive conditioning tensor - */ + use_cache?: boolean | null; + /** @description Positive conditioning tensor */ positive_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Negative Conditioning - * @description Negative conditioning tensor - */ + /** @description Negative conditioning tensor */ negative_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Noise - * @description Noise tensor - */ + /** @description Noise tensor */ noise?: components["schemas"]["LatentsField"]; /** * Steps @@ -6895,10 +6405,7 @@ export type components = { * @enum {string} */ precision?: "tensor(bool)" | "tensor(int8)" | "tensor(uint8)" | "tensor(int16)" | "tensor(uint16)" | "tensor(int32)" | "tensor(uint32)" | "tensor(int64)" | "tensor(uint64)" | "tensor(float16)" | "tensor(float)" | "tensor(double)"; - /** - * Unet - * @description UNet (scheduler, LoRAs) - */ + /** @description UNet (scheduler, LoRAs) */ unet?: components["schemas"]["UNetField"]; /** * Control @@ -6906,17 +6413,13 @@ export type components = { */ control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][]; /** - * Type + * type * @default t2l_onnx - * @enum {string} + * @constant */ type: "t2l_onnx"; }; - /** - * OffsetPaginatedResults[BoardDTO] - * @description Offset-paginated results - * Generic must be a Pydantic model - */ + /** OffsetPaginatedResults[BoardDTO] */ OffsetPaginatedResults_BoardDTO_: { /** * Limit @@ -6939,11 +6442,7 @@ export type components = { */ items: components["schemas"]["BoardDTO"][]; }; - /** - * OffsetPaginatedResults[ImageDTO] - * @description Offset-paginated results - * Generic must be a Pydantic model - */ + /** OffsetPaginatedResults[ImageDTO] */ OffsetPaginatedResults_ImageDTO_: { /** * Limit @@ -6996,27 +6495,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Model - * @description ONNX Main model (UNet, VAE, CLIP) to load - */ + use_cache?: boolean | null; + /** @description ONNX Main model (UNet, VAE, CLIP) to load */ model: components["schemas"]["OnnxModelField"]; /** - * Type + * type * @default onnx_model_loader - * @enum {string} + * @constant */ type: "onnx_model_loader"; }; @@ -7035,29 +6531,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default openpose_image_processor - * @enum {string} - */ - type: "openpose_image_processor"; /** * Hand And Face * @description Whether to use hands and face mode @@ -7076,38 +6563,12 @@ export type components = { * @default 512 */ image_resolution?: number; - }; - /** - * PaginatedResults[GraphExecutionState] - * @description Paginated results - * Generic must be a Pydantic model - */ - PaginatedResults_GraphExecutionState_: { /** - * Page - * @description Current Page + * type + * @default openpose_image_processor + * @constant */ - page: number; - /** - * Pages - * @description Total number of pages - */ - pages: number; - /** - * Per Page - * @description Number of items per page - */ - per_page: number; - /** - * Total - * @description Total number of items in result - */ - total: number; - /** - * Items - * @description Items - */ - items: components["schemas"]["GraphExecutionState"][]; + type: "openpose_image_processor"; }; /** * PIDI Processor @@ -7124,29 +6585,20 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default pidi_image_processor - * @enum {string} - */ - type: "pidi_image_processor"; /** * Detect Resolution * @description Pixel resolution for detection @@ -7171,6 +6623,12 @@ export type components = { * @default false */ scribble?: boolean; + /** + * type + * @default pidi_image_processor + * @constant + */ + type: "pidi_image_processor"; }; /** * Prompts from File @@ -7187,18 +6645,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * File Path * @description Path to prompt text file @@ -7208,12 +6666,12 @@ export type components = { * Pre Prompt * @description String to prepend to each prompt */ - pre_prompt?: string; + pre_prompt?: string | null; /** * Post Prompt * @description String to append to each prompt */ - post_prompt?: string; + post_prompt?: string | null; /** * Start Line * @description Line in the file to start start from @@ -7227,9 +6685,9 @@ export type components = { */ max_prompts?: number; /** - * Type + * type * @default prompt_from_file - * @enum {string} + * @constant */ type: "prompt_from_file"; }; @@ -7259,18 +6717,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache - * @default true + * @default false */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Low * @description The inclusive low value @@ -7290,9 +6748,9 @@ export type components = { */ decimals?: number; /** - * Type + * type * @default rand_float - * @enum {string} + * @constant */ type: "rand_float"; }; @@ -7311,18 +6769,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default false */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Low * @description The inclusive low value @@ -7336,9 +6794,9 @@ export type components = { */ high?: number; /** - * Type + * type * @default rand_int - * @enum {string} + * @constant */ type: "rand_int"; }; @@ -7357,18 +6815,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default false */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Low * @description The inclusive low value @@ -7393,9 +6851,9 @@ export type components = { */ seed?: number; /** - * Type + * type * @default random_range - * @enum {string} + * @constant */ type: "random_range"; }; @@ -7414,18 +6872,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Start * @description The start of the range @@ -7445,9 +6903,9 @@ export type components = { */ step?: number; /** - * Type + * type * @default range - * @enum {string} + * @constant */ type: "range"; }; @@ -7466,18 +6924,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Start * @description The start of the range @@ -7497,9 +6955,9 @@ export type components = { */ step?: number; /** - * Type + * type * @default range_of_size - * @enum {string} + * @constant */ type: "range_of_size"; }; @@ -7526,22 +6984,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Latents - * @description Latents tensor - */ + use_cache?: boolean | null; + /** @description Latents tensor */ latents?: components["schemas"]["LatentsField"]; /** * Width @@ -7567,9 +7022,9 @@ export type components = { */ antialias?: boolean; /** - * Type + * type * @default lresize - * @enum {string} + * @constant */ type: "lresize"; }; @@ -7598,18 +7053,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The float value @@ -7623,9 +7078,9 @@ export type components = { */ decimals?: number; /** - * Type + * type * @default round_float - * @enum {string} + * @constant */ type: "round_float"; }; @@ -7644,18 +7099,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Prompt * @description Prompt to be parsed by Compel to create a conditioning tensor @@ -7709,9 +7164,9 @@ export type components = { */ clip2?: components["schemas"]["ClipField"]; /** - * Type + * type * @default sdxl_compel_prompt - * @enum {string} + * @constant */ type: "sdxl_compel_prompt"; }; @@ -7730,18 +7185,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * LoRA * @description LoRA model to load @@ -7757,21 +7212,21 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * CLIP 1 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components["schemas"]["ClipField"] | null; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2?: components["schemas"]["ClipField"]; + clip2?: components["schemas"]["ClipField"] | null; /** - * Type + * type * @default sdxl_lora_loader - * @enum {string} + * @constant */ type: "sdxl_lora_loader"; }; @@ -7784,21 +7239,21 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * CLIP 1 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components["schemas"]["ClipField"]; + clip?: components["schemas"]["ClipField"] | null; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2?: components["schemas"]["ClipField"]; + clip2?: components["schemas"]["ClipField"] | null; /** - * Type + * type * @default sdxl_lora_loader_output - * @enum {string} + * @constant */ type: "sdxl_lora_loader_output"; }; @@ -7817,27 +7272,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Model - * @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load - */ + use_cache?: boolean | null; + /** @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load */ model: components["schemas"]["MainModelField"]; /** - * Type + * type * @default sdxl_model_loader - * @enum {string} + * @constant */ type: "sdxl_model_loader"; }; @@ -7867,9 +7319,9 @@ export type components = { */ vae: components["schemas"]["VaeField"]; /** - * Type + * type * @default sdxl_model_loader_output - * @enum {string} + * @constant */ type: "sdxl_model_loader_output"; }; @@ -7888,18 +7340,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Style * @description Prompt to be parsed by Compel to create a conditioning tensor @@ -7932,15 +7384,12 @@ export type components = { * @default 6 */ aesthetic_score?: number; - /** - * Clip2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ + /** @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ clip2?: components["schemas"]["ClipField"]; /** - * Type + * type * @default sdxl_refiner_compel_prompt - * @enum {string} + * @constant */ type: "sdxl_refiner_compel_prompt"; }; @@ -7959,27 +7408,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Model - * @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load - */ + use_cache?: boolean | null; + /** @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load */ model: components["schemas"]["MainModelField"]; /** - * Type + * type * @default sdxl_refiner_model_loader - * @enum {string} + * @constant */ type: "sdxl_refiner_model_loader"; }; @@ -8004,9 +7450,9 @@ export type components = { */ vae: components["schemas"]["VaeField"]; /** - * Type + * type * @default sdxl_refiner_model_loader_output - * @enum {string} + * @constant */ type: "sdxl_refiner_model_loader_output"; }; @@ -8025,37 +7471,28 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default false */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; + /** @description The board to save the image to */ + board?: components["schemas"]["BoardField"] | null; + /** @description Optional core metadata to be written to image */ + metadata?: components["schemas"]["CoreMetadata"] | null; /** - * Board - * @description The board to save the image to - */ - board?: components["schemas"]["BoardField"]; - /** - * Metadata - * @description Optional core metadata to be written to image - */ - metadata?: components["schemas"]["CoreMetadata"]; - /** - * Type + * type * @default save_image - * @enum {string} + * @constant */ type: "save_image"; }; @@ -8074,22 +7511,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Latents - * @description Latents tensor - */ + use_cache?: boolean | null; + /** @description Latents tensor */ latents?: components["schemas"]["LatentsField"]; /** * Scale Factor @@ -8110,9 +7544,9 @@ export type components = { */ antialias?: boolean; /** - * Type + * type * @default lscale - * @enum {string} + * @constant */ type: "lscale"; }; @@ -8131,18 +7565,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Scheduler * @description Scheduler to use during inference @@ -8151,18 +7585,13 @@ export type components = { */ scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; /** - * Type + * type * @default scheduler - * @enum {string} + * @constant */ type: "scheduler"; }; - /** - * SchedulerOutput - * @description Base class for all invocation outputs. - * - * All invocation outputs must use the `@invocation_output` decorator to provide their unique type. - */ + /** SchedulerOutput */ SchedulerOutput: { /** * Scheduler @@ -8171,15 +7600,14 @@ export type components = { */ scheduler: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; /** - * Type + * type * @default scheduler_output - * @enum {string} + * @constant */ type: "scheduler_output"; }; /** * SchedulerPredictionType - * @description An enumeration. * @enum {string} */ SchedulerPredictionType: "epsilon" | "v_prediction" | "sample"; @@ -8198,28 +7626,28 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * VAE * @description VAE model to load */ - vae?: components["schemas"]["VaeField"]; + vae?: components["schemas"]["VaeField"] | null; /** * Seamless Y * @description Specify whether Y axis is seamless @@ -8233,9 +7661,9 @@ export type components = { */ seamless_x?: boolean; /** - * Type + * type * @default seamless - * @enum {string} + * @constant */ type: "seamless"; }; @@ -8248,16 +7676,16 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * VAE * @description VAE */ - vae?: components["schemas"]["VaeField"]; + vae?: components["schemas"]["VaeField"] | null; /** - * Type + * type * @default seamless_output - * @enum {string} + * @constant */ type: "seamless_output"; }; @@ -8276,27 +7704,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default segment_anything_processor - * @enum {string} + * @constant */ type: "segment_anything_processor"; }; @@ -8321,10 +7746,7 @@ export type components = { queue: components["schemas"]["SessionQueueStatus"]; processor: components["schemas"]["SessionProcessorStatus"]; }; - /** - * SessionQueueItem - * @description Session queue item without the full graph. Used for serialization. - */ + /** SessionQueueItem */ SessionQueueItem: { /** * Item Id @@ -8358,7 +7780,7 @@ export type components = { * Error * @description The error message if this queue item errored */ - error?: string; + error?: string | null; /** * Created At * @description When this queue item was created @@ -8373,12 +7795,12 @@ export type components = { * Started At * @description When this queue item was started */ - started_at?: string; + started_at?: string | null; /** * Completed At * @description When this queue item was completed */ - completed_at?: string; + completed_at?: string | null; /** * Queue Id * @description The id of the queue with which this item is associated @@ -8388,17 +7810,11 @@ export type components = { * Field Values * @description The field values that were used for this queue item */ - field_values?: components["schemas"]["NodeFieldValue"][]; - /** - * Session - * @description The fully-populated session to be executed - */ + field_values?: components["schemas"]["NodeFieldValue"][] | null; + /** @description The fully-populated session to be executed */ session: components["schemas"]["GraphExecutionState"]; }; - /** - * SessionQueueItemDTO - * @description Session queue item without the full graph. Used for serialization. - */ + /** SessionQueueItemDTO */ SessionQueueItemDTO: { /** * Item Id @@ -8432,7 +7848,7 @@ export type components = { * Error * @description The error message if this queue item errored */ - error?: string; + error?: string | null; /** * Created At * @description When this queue item was created @@ -8447,12 +7863,12 @@ export type components = { * Started At * @description When this queue item was started */ - started_at?: string; + started_at?: string | null; /** * Completed At * @description When this queue item was completed */ - completed_at?: string; + completed_at?: string | null; /** * Queue Id * @description The id of the queue with which this item is associated @@ -8462,7 +7878,7 @@ export type components = { * Field Values * @description The field values that were used for this queue item */ - field_values?: components["schemas"]["NodeFieldValue"][]; + field_values?: components["schemas"]["NodeFieldValue"][] | null; }; /** SessionQueueStatus */ SessionQueueStatus: { @@ -8475,17 +7891,17 @@ export type components = { * Item Id * @description The current queue item id */ - item_id?: number; + item_id: number | null; /** * Batch Id * @description The current queue item's batch id */ - batch_id?: string; + batch_id: string | null; /** * Session Id * @description The current queue item's session id */ - session_id?: string; + session_id: string | null; /** * Pending * @description Number of queue items with status 'pending' @@ -8532,27 +7948,24 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to show - */ + use_cache?: boolean | null; + /** @description The image to show */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default show_image - * @enum {string} + * @constant */ type: "show_image"; }; @@ -8563,21 +7976,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "checkpoint"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; /** Config */ config: string; variant: components["schemas"]["ModelVariantType"]; @@ -8589,21 +8003,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; variant: components["schemas"]["ModelVariantType"]; }; /** StableDiffusion2ModelCheckpointConfig */ @@ -8613,21 +8028,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "checkpoint"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; /** Config */ config: string; variant: components["schemas"]["ModelVariantType"]; @@ -8639,21 +8055,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; variant: components["schemas"]["ModelVariantType"]; }; /** StableDiffusionXLModelCheckpointConfig */ @@ -8663,21 +8080,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "checkpoint"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; /** Config */ config: string; variant: components["schemas"]["ModelVariantType"]; @@ -8689,21 +8107,22 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default main + * @constant */ model_type: "main"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; /** Vae */ - vae?: string; + vae?: string | null; variant: components["schemas"]["ModelVariantType"]; }; /** @@ -8721,18 +8140,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Easing * @description The easing function to use @@ -8774,12 +8193,12 @@ export type components = { * Pre Start Value * @description value before easing start */ - pre_start_value?: number; + pre_start_value?: number | null; /** * Post End Value * @description value after easing end */ - post_end_value?: number; + post_end_value?: number | null; /** * Mirror * @description include mirror of easing function @@ -8793,9 +8212,9 @@ export type components = { */ show_easing_plot?: boolean; /** - * Type + * type * @default step_param_easing - * @enum {string} + * @constant */ type: "step_param_easing"; }; @@ -8815,9 +8234,9 @@ export type components = { */ string_2: string; /** - * Type + * type * @default string_2_output - * @enum {string} + * @constant */ type: "string_2_output"; }; @@ -8836,27 +8255,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Collection * @description The collection of string values */ collection?: string[]; /** - * Type + * type * @default string_collection - * @enum {string} + * @constant */ type: "string_collection"; }; @@ -8871,9 +8290,9 @@ export type components = { */ collection: string[]; /** - * Type + * type * @default string_collection_output - * @enum {string} + * @constant */ type: "string_collection_output"; }; @@ -8892,18 +8311,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * Value * @description The string value @@ -8911,9 +8330,9 @@ export type components = { */ value?: string; /** - * Type + * type * @default string - * @enum {string} + * @constant */ type: "string"; }; @@ -8932,18 +8351,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * String Left * @description String Left @@ -8957,9 +8376,9 @@ export type components = { */ string_right?: string; /** - * Type + * type * @default string_join - * @enum {string} + * @constant */ type: "string_join"; }; @@ -8978,18 +8397,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * String Left * @description String Left @@ -9009,9 +8428,9 @@ export type components = { */ string_right?: string; /** - * Type + * type * @default string_join_three - * @enum {string} + * @constant */ type: "string_join_three"; }; @@ -9026,9 +8445,9 @@ export type components = { */ value: string; /** - * Type + * type * @default string_output - * @enum {string} + * @constant */ type: "string_output"; }; @@ -9048,9 +8467,9 @@ export type components = { */ negative_string: string; /** - * Type + * type * @default string_pos_neg_output - * @enum {string} + * @constant */ type: "string_pos_neg_output"; }; @@ -9069,18 +8488,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * String * @description String to work on @@ -9106,9 +8525,9 @@ export type components = { */ use_regex?: boolean; /** - * Type + * type * @default string_replace - * @enum {string} + * @constant */ type: "string_replace"; }; @@ -9127,18 +8546,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * String * @description String to split @@ -9152,9 +8571,9 @@ export type components = { */ delimiter?: string; /** - * Type + * type * @default string_split - * @enum {string} + * @constant */ type: "string_split"; }; @@ -9173,18 +8592,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * String * @description String to split @@ -9192,15 +8611,14 @@ export type components = { */ string?: string; /** - * Type + * type * @default string_split_neg - * @enum {string} + * @constant */ type: "string_split_neg"; }; /** * SubModelType - * @description An enumeration. * @enum {string} */ SubModelType: "unet" | "text_encoder" | "text_encoder_2" | "tokenizer" | "tokenizer_2" | "vae" | "vae_decoder" | "vae_encoder" | "scheduler" | "safety_checker"; @@ -9219,18 +8637,18 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * A * @description The first number @@ -9244,23 +8662,17 @@ export type components = { */ b?: number; /** - * Type + * type * @default sub - * @enum {string} + * @constant */ type: "sub"; }; /** T2IAdapterField */ T2IAdapterField: { - /** - * Image - * @description The T2I-Adapter image prompt. - */ + /** @description The T2I-Adapter image prompt. */ image: components["schemas"]["ImageField"]; - /** - * T2I Adapter Model - * @description The T2I-Adapter model to use. - */ + /** @description The T2I-Adapter model to use. */ t2i_adapter_model: components["schemas"]["T2IAdapterModelField"]; /** * Weight @@ -9303,22 +8715,19 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The IP-Adapter image prompt. - */ + use_cache?: boolean | null; + /** @description The IP-Adapter image prompt. */ image?: components["schemas"]["ImageField"]; /** * T2I-Adapter Model @@ -9351,9 +8760,9 @@ export type components = { */ resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; /** - * Type + * type * @default t2i_adapter - * @enum {string} + * @constant */ type: "t2i_adapter"; }; @@ -9364,19 +8773,20 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default t2i_adapter + * @constant */ model_type: "t2i_adapter"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** * Model Format - * @enum {string} + * @constant */ model_format: "diffusers"; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** T2IAdapterModelField */ T2IAdapterModelField: { @@ -9388,12 +8798,7 @@ export type components = { /** @description Base model */ base_model: components["schemas"]["BaseModelType"]; }; - /** - * T2IAdapterOutput - * @description Base class for all invocation outputs. - * - * All invocation outputs must use the `@invocation_output` decorator to provide their unique type. - */ + /** T2IAdapterOutput */ T2IAdapterOutput: { /** * T2I Adapter @@ -9401,9 +8806,9 @@ export type components = { */ t2i_adapter: components["schemas"]["T2IAdapterField"]; /** - * Type + * type * @default t2i_adapter_output - * @enum {string} + * @constant */ type: "t2i_adapter_output"; }; @@ -9414,16 +8819,17 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default embedding + * @constant */ model_type: "embedding"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; /** Model Format */ model_format: null; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** * Tile Resample Processor @@ -9440,47 +8846,38 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; - /** - * Type - * @default tile_image_processor - * @enum {string} - */ - type: "tile_image_processor"; /** * Down Sampling Rate * @description Down sampling rate * @default 1 */ down_sampling_rate?: number; + /** + * type + * @default tile_image_processor + * @constant + */ + type: "tile_image_processor"; }; /** UNetField */ UNetField: { - /** - * Unet - * @description Info to load unet submodel - */ + /** @description Info to load unet submodel */ unet: components["schemas"]["ModelInfo"]; - /** - * Scheduler - * @description Info to load scheduler submodel - */ + /** @description Info to load scheduler submodel */ scheduler: components["schemas"]["ModelInfo"]; /** * Loras @@ -9521,10 +8918,7 @@ export type components = { }; /** VaeField */ VaeField: { - /** - * Vae - * @description Info to load vae submodel - */ + /** @description Info to load vae submodel */ vae: components["schemas"]["ModelInfo"]; /** * Seamless Axes @@ -9547,27 +8941,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; + use_cache?: boolean | null; /** * VAE * @description VAE model to load */ vae_model: components["schemas"]["VAEModelField"]; /** - * Type + * type * @default vae_loader - * @enum {string} + * @constant */ type: "vae_loader"; }; @@ -9582,9 +8976,9 @@ export type components = { */ vae: components["schemas"]["VaeField"]; /** - * Type + * type * @default vae_loader_output - * @enum {string} + * @constant */ type: "vae_loader_output"; }; @@ -9595,19 +8989,19 @@ export type components = { base_model: components["schemas"]["BaseModelType"]; /** * Model Type - * @enum {string} + * @default vae + * @constant */ model_type: "vae"; /** Path */ path: string; /** Description */ - description?: string; + description?: string | null; model_format: components["schemas"]["VaeModelFormat"]; - error?: components["schemas"]["ModelError"]; + error?: components["schemas"]["ModelError"] | null; }; /** * VaeModelFormat - * @description An enumeration. * @enum {string} */ VaeModelFormat: "checkpoint" | "diffusers"; @@ -9635,57 +9029,27 @@ export type components = { * @description Whether or not this is an intermediate invocation. * @default false */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** * Workflow * @description The workflow to save with the image */ - workflow?: string; + workflow?: string | null; /** * Use Cache * @description Whether or not to use the cache * @default true */ - use_cache?: boolean; - /** - * Image - * @description The image to process - */ + use_cache?: boolean | null; + /** @description The image to process */ image?: components["schemas"]["ImageField"]; /** - * Type + * type * @default zoe_depth_image_processor - * @enum {string} + * @constant */ type: "zoe_depth_image_processor"; }; - /** - * UIConfigBase - * @description Provides additional node configuration to the UI. - * This is used internally by the @invocation decorator logic. Do not use this directly. - */ - UIConfigBase: { - /** - * Tags - * @description The node's tags - */ - tags?: string[]; - /** - * Title - * @description The node's display name - */ - title?: string; - /** - * Category - * @description The node's category - */ - category?: string; - /** - * Version - * @description The node's version. Should be a valid semver string e.g. "1.0.0" or "3.8.13". - */ - version?: string; - }; /** * Input * @description The type of input a field accepts. @@ -9695,6 +9059,42 @@ export type components = { * @enum {string} */ Input: "connection" | "direct" | "any"; + /** + * UIComponent + * @description The type of UI component to use for a field, used to override the default components, which are inferred from the field type. + * @enum {string} + */ + UIComponent: "none" | "textarea" | "slider"; + /** + * UIConfigBase + * @description Provides additional node configuration to the UI. + * This is used internally by the @invocation decorator logic. Do not use this directly. + */ + UIConfigBase: { + /** + * Tags + * @description The node's tags + */ + tags: string[] | null; + /** + * Title + * @description The node's display name + * @default null + */ + title: string | null; + /** + * Category + * @description The node's category + * @default null + */ + category: string | null; + /** + * Version + * @description The node's version. Should be a valid semver string e.g. "1.0.0" or "3.8.13". + * @default null + */ + version: string | null; + }; /** * UIType * @description Type hints for the UI. @@ -9702,12 +9102,6 @@ export type components = { * @enum {string} */ UIType: "boolean" | "ColorField" | "ConditioningField" | "ControlField" | "float" | "ImageField" | "integer" | "LatentsField" | "string" | "BooleanCollection" | "ColorCollection" | "ConditioningCollection" | "ControlCollection" | "FloatCollection" | "ImageCollection" | "IntegerCollection" | "LatentsCollection" | "StringCollection" | "BooleanPolymorphic" | "ColorPolymorphic" | "ConditioningPolymorphic" | "ControlPolymorphic" | "FloatPolymorphic" | "ImagePolymorphic" | "IntegerPolymorphic" | "LatentsPolymorphic" | "StringPolymorphic" | "MainModelField" | "SDXLMainModelField" | "SDXLRefinerModelField" | "ONNXModelField" | "VaeModelField" | "LoRAModelField" | "ControlNetModelField" | "IPAdapterModelField" | "UNetField" | "VaeField" | "ClipField" | "Collection" | "CollectionItem" | "enum" | "Scheduler" | "WorkflowField" | "IsIntermediate" | "MetadataField" | "BoardField"; - /** - * UIComponent - * @description The type of UI component to use for a field, used to override the default components, which are inferred from the field type. - * @enum {string} - */ - UIComponent: "none" | "textarea" | "slider"; /** * _InputField * @description *DO NOT USE* @@ -9719,16 +9113,16 @@ export type components = { input: components["schemas"]["Input"]; /** Ui Hidden */ ui_hidden: boolean; - ui_type?: components["schemas"]["UIType"]; - ui_component?: components["schemas"]["UIComponent"]; + ui_type: components["schemas"]["UIType"] | null; + ui_component: components["schemas"]["UIComponent"] | null; /** Ui Order */ - ui_order?: number; + ui_order: number | null; /** Ui Choice Labels */ - ui_choice_labels?: { + ui_choice_labels: { [key: string]: string; - }; + } | null; /** Item Default */ - item_default?: unknown; + item_default: unknown; }; /** * _OutputField @@ -9740,10 +9134,46 @@ export type components = { _OutputField: { /** Ui Hidden */ ui_hidden: boolean; - ui_type?: components["schemas"]["UIType"]; + ui_type: components["schemas"]["UIType"] | null; /** Ui Order */ - ui_order?: number; + ui_order: number | null; }; + /** + * StableDiffusionOnnxModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionOnnxModelFormat: "olive" | "onnx"; + /** + * StableDiffusion2ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusion1ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; + /** + * ControlNetModelFormat + * @description An enumeration. + * @enum {string} + */ + ControlNetModelFormat: "checkpoint" | "diffusers"; + /** + * CLIPVisionModelFormat + * @description An enumeration. + * @enum {string} + */ + CLIPVisionModelFormat: "diffusers"; + /** + * StableDiffusionXLModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; /** * IPAdapterModelFormat * @description An enumeration. @@ -9756,42 +9186,6 @@ export type components = { * @enum {string} */ T2IAdapterModelFormat: "diffusers"; - /** - * StableDiffusion2ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; - /** - * CLIPVisionModelFormat - * @description An enumeration. - * @enum {string} - */ - CLIPVisionModelFormat: "diffusers"; - /** - * ControlNetModelFormat - * @description An enumeration. - * @enum {string} - */ - ControlNetModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusionOnnxModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; - /** - * StableDiffusion1ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusionXLModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; @@ -9806,382 +9200,6 @@ export type external = Record; export type operations = { - /** - * List Sessions - * @deprecated - * @description Gets a list of sessions, optionally searching - */ - list_sessions: { - parameters: { - query?: { - /** @description The page of results to get */ - page?: number; - /** @description The number of results per page */ - per_page?: number; - /** @description The query string to search for */ - query?: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["PaginatedResults_GraphExecutionState_"]; - }; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Create Session - * @deprecated - * @description Creates a new session, optionally initializing it with an invocation graph - */ - create_session: { - parameters: { - query?: { - /** @description The id of the queue to associate the session with */ - queue_id?: string; - }; - }; - requestBody?: { - content: { - "application/json": components["schemas"]["Graph"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Invalid json */ - 400: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Get Session - * @deprecated - * @description Gets a session - */ - get_session: { - parameters: { - path: { - /** @description The id of the session to get */ - session_id: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Add Node - * @deprecated - * @description Adds a node to the graph - */ - add_node: { - parameters: { - path: { - /** @description The id of the session */ - session_id: string; - }; - }; - requestBody: { - content: { - "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": string; - }; - }; - /** @description Invalid node or link */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Update Node - * @deprecated - * @description Updates a node in the graph and removes all linked edges - */ - update_node: { - parameters: { - path: { - /** @description The id of the session */ - session_id: string; - /** @description The path to the node in the graph */ - node_path: string; - }; - }; - requestBody: { - content: { - "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Invalid node or link */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Delete Node - * @deprecated - * @description Deletes a node in the graph and removes all linked edges - */ - delete_node: { - parameters: { - path: { - /** @description The id of the session */ - session_id: string; - /** @description The path to the node to delete */ - node_path: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Invalid node or link */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Add Edge - * @deprecated - * @description Adds an edge to the graph - */ - add_edge: { - parameters: { - path: { - /** @description The id of the session */ - session_id: string; - }; - }; - requestBody: { - content: { - "application/json": components["schemas"]["Edge"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Invalid node or link */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Delete Edge - * @deprecated - * @description Deletes an edge from the graph - */ - delete_edge: { - parameters: { - path: { - /** @description The id of the session */ - session_id: string; - /** @description The id of the node the edge is coming from */ - from_node_id: string; - /** @description The field of the node the edge is coming from */ - from_field: string; - /** @description The id of the node the edge is going to */ - to_node_id: string; - /** @description The field of the node the edge is going to */ - to_field: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Invalid node or link */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Invoke Session - * @deprecated - * @description Invokes a session - */ - invoke_session: { - parameters: { - query: { - /** @description The id of the queue to associate the session with */ - queue_id: string; - /** @description Whether or not to invoke all remaining invocations */ - all?: boolean; - }; - path: { - /** @description The id of the session to invoke */ - session_id: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": unknown; - }; - }; - /** @description The invocation is queued */ - 202: { - content: never; - }; - /** @description The session has no invocations ready to invoke */ - 400: { - content: never; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - /** - * Cancel Session Invoke - * @deprecated - * @description Invokes a session - */ - cancel_session_invoke: { - parameters: { - path: { - /** @description The id of the session to cancel */ - session_id: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": unknown; - }; - }; - /** @description The invocation is canceled */ - 202: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; /** * Parse Dynamicprompts * @description Creates a batch process @@ -10215,9 +9233,9 @@ export type operations = { parameters: { query?: { /** @description Base models to include */ - base_models?: components["schemas"]["BaseModelType"][]; + base_models?: components["schemas"]["BaseModelType"][] | null; /** @description The type of model to get */ - model_type?: components["schemas"]["ModelType"]; + model_type?: components["schemas"]["ModelType"] | null; }; }; responses: { @@ -10400,7 +9418,7 @@ export type operations = { parameters: { query?: { /** @description Save the converted model to the designated directory */ - convert_dest_directory?: string; + convert_dest_directory?: string | null; }; path: { /** @description Base model */ @@ -10541,11 +9559,11 @@ export type operations = { /** @description Whether this is an intermediate image */ is_intermediate: boolean; /** @description The board to add this image to, if any */ - board_id?: string; + board_id?: string | null; /** @description The session ID associated with this upload, if any */ - session_id?: string; + session_id?: string | null; /** @description Whether to crop the image */ - crop_visible?: boolean; + crop_visible?: boolean | null; }; }; requestBody: { @@ -10664,7 +9682,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": unknown; + "application/json": number; }; }; }; @@ -10789,13 +9807,13 @@ export type operations = { parameters: { query?: { /** @description The origin of images to list. */ - image_origin?: components["schemas"]["ResourceOrigin"]; + image_origin?: components["schemas"]["ResourceOrigin"] | null; /** @description The categories of image to include. */ - categories?: components["schemas"]["ImageCategory"][]; + categories?: components["schemas"]["ImageCategory"][] | null; /** @description Whether to list intermediate images. */ - is_intermediate?: boolean; + is_intermediate?: boolean | null; /** @description The board id to filter by. Use 'none' to find images without a board. */ - board_id?: string; + board_id?: string | null; /** @description The page offset */ offset?: number; /** @description The number of images per page */ @@ -10913,11 +9931,11 @@ export type operations = { parameters: { query?: { /** @description Whether to list all boards */ - all?: boolean; + all?: boolean | null; /** @description The page offset */ - offset?: number; + offset?: number | null; /** @description The number of boards per page */ - limit?: number; + limit?: number | null; }; }; responses: { @@ -10995,7 +10013,7 @@ export type operations = { parameters: { query?: { /** @description Permanently delete all images on the board */ - include_images?: boolean; + include_images?: boolean | null; }; path: { /** @description The id of board to delete */ @@ -11311,7 +10329,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": unknown; + "application/json": components["schemas"]["EnqueueGraphResult"]; }; }; /** @description Created */ @@ -11348,7 +10366,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": unknown; + "application/json": components["schemas"]["EnqueueBatchResult"]; }; }; /** @description Created */ @@ -11375,9 +10393,9 @@ export type operations = { /** @description The number of items to fetch */ limit?: number; /** @description The status of items to fetch */ - status?: "pending" | "in_progress" | "completed" | "failed" | "canceled"; + status?: ("pending" | "in_progress" | "completed" | "failed" | "canceled") | null; /** @description The pagination cursor */ - cursor?: number; + cursor?: number | null; /** @description The pagination cursor priority */ priority?: number; }; @@ -11551,7 +10569,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["SessionQueueItem"]; + "application/json": components["schemas"]["SessionQueueItem"] | null; }; }; /** @description Validation Error */ @@ -11577,7 +10595,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - "application/json": components["schemas"]["SessionQueueItem"]; + "application/json": components["schemas"]["SessionQueueItem"] | null; }; }; /** @description Validation Error */ diff --git a/pyproject.toml b/pyproject.toml index bab87172c2..4c8ec0f5e8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,10 +35,10 @@ dependencies = [ "accelerate~=0.23.0", "albumentations", "click", - "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", + "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", "compel~=2.0.2", "controlnet-aux>=0.0.6", - "timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26 + "timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26 "datasets", # When bumping diffusers beyond 0.21, make sure to address this: # https://github.com/invoke-ai/InvokeAI/blob/fc09ab7e13cb7ca5389100d149b6422ace7b8ed3/invokeai/app/invocations/latent.py#L513 @@ -48,19 +48,20 @@ dependencies = [ "easing-functions", "einops", "facexlib", - "fastapi==0.88.0", - "fastapi-events==0.8.0", + "fastapi~=0.103.2", + "fastapi-events~=0.9.1", "huggingface-hub~=0.16.4", - "invisible-watermark~=0.2.0", # needed to install SDXL base and refiner using their repo_ids - "matplotlib", # needed for plotting of Penner easing functions - "mediapipe", # needed for "mediapipeface" controlnet model + "invisible-watermark~=0.2.0", # needed to install SDXL base and refiner using their repo_ids + "matplotlib", # needed for plotting of Penner easing functions + "mediapipe", # needed for "mediapipeface" controlnet model "numpy", "npyscreen", "omegaconf", "onnx", "onnxruntime", "opencv-python", - "pydantic==1.*", + "pydantic~=2.4.2", + "pydantic-settings~=2.0.3", "picklescan", "pillow", "prompt-toolkit", @@ -95,33 +96,25 @@ dependencies = [ "mkdocs-git-revision-date-localized-plugin", "mkdocs-redirects==1.2.0", ] -"dev" = [ - "jurigged", - "pudb", -] +"dev" = ["jurigged", "pudb"] "test" = [ "black", "flake8", "Flake8-pyproject", "isort", + "mypy", "pre-commit", "pytest>6.0.0", "pytest-cov", "pytest-datadir", ] "xformers" = [ - "xformers~=0.0.19; sys_platform!='darwin'", - "triton; sys_platform=='linux'", -] -"onnx" = [ - "onnxruntime", -] -"onnx-cuda" = [ - "onnxruntime-gpu", -] -"onnx-directml" = [ - "onnxruntime-directml", + "xformers~=0.0.19; sys_platform!='darwin'", + "triton; sys_platform=='linux'", ] +"onnx" = ["onnxruntime"] +"onnx-cuda" = ["onnxruntime-gpu"] +"onnx-directml" = ["onnxruntime-directml"] [project.scripts] @@ -163,12 +156,15 @@ version = { attr = "invokeai.version.__version__" } [tool.setuptools.packages.find] "where" = ["."] "include" = [ - "invokeai.assets.fonts*","invokeai.version*", - "invokeai.generator*","invokeai.backend*", - "invokeai.frontend*", "invokeai.frontend.web.dist*", - "invokeai.frontend.web.static*", - "invokeai.configs*", - "invokeai.app*", + "invokeai.assets.fonts*", + "invokeai.version*", + "invokeai.generator*", + "invokeai.backend*", + "invokeai.frontend*", + "invokeai.frontend.web.dist*", + "invokeai.frontend.web.static*", + "invokeai.configs*", + "invokeai.app*", ] [tool.setuptools.package-data] @@ -182,7 +178,7 @@ version = { attr = "invokeai.version.__version__" } [tool.pytest.ini_options] addopts = "--cov-report term --cov-report html --cov-report xml --strict-markers -m \"not slow\"" markers = [ - "slow: Marks tests as slow. Disabled by default. To run all tests, use -m \"\". To run only slow tests, use -m \"slow\"." + "slow: Marks tests as slow. Disabled by default. To run all tests, use -m \"\". To run only slow tests, use -m \"slow\".", ] [tool.coverage.run] branch = true @@ -190,7 +186,7 @@ source = ["invokeai"] omit = ["*tests*", "*migrations*", ".venv/*", "*.env"] [tool.coverage.report] show_missing = true -fail_under = 85 # let's set something sensible on Day 1 ... +fail_under = 85 # let's set something sensible on Day 1 ... [tool.coverage.json] output = "coverage/coverage.json" pretty_print = true @@ -209,7 +205,7 @@ exclude = [ "__pycache__", "build", "dist", - "invokeai/frontend/web/node_modules/" + "invokeai/frontend/web/node_modules/", ] [tool.black] @@ -218,3 +214,53 @@ line-length = 120 [tool.isort] profile = "black" line_length = 120 + +[tool.mypy] +ignore_missing_imports = true # ignores missing types in third-party libraries + +[[tool.mypy.overrides]] +follow_imports = "skip" +module = [ + "invokeai.app.api.routers.models", + "invokeai.app.invocations.compel", + "invokeai.app.invocations.latent", + "invokeai.app.services.config.config_base", + "invokeai.app.services.config.config_default", + "invokeai.app.services.invocation_stats.invocation_stats_default", + "invokeai.app.services.model_manager.model_manager_base", + "invokeai.app.services.model_manager.model_manager_default", + "invokeai.app.util.controlnet_utils", + "invokeai.backend.image_util.txt2mask", + "invokeai.backend.image_util.safety_checker", + "invokeai.backend.image_util.patchmatch", + "invokeai.backend.image_util.invisible_watermark", + "invokeai.backend.install.model_install_backend", + "invokeai.backend.ip_adapter.ip_adapter", + "invokeai.backend.ip_adapter.resampler", + "invokeai.backend.ip_adapter.unet_patcher", + "invokeai.backend.model_management.convert_ckpt_to_diffusers", + "invokeai.backend.model_management.lora", + "invokeai.backend.model_management.model_cache", + "invokeai.backend.model_management.model_manager", + "invokeai.backend.model_management.model_merge", + "invokeai.backend.model_management.model_probe", + "invokeai.backend.model_management.model_search", + "invokeai.backend.model_management.models.*", # this is needed to ignore the module's `__init__.py` + "invokeai.backend.model_management.models.base", + "invokeai.backend.model_management.models.controlnet", + "invokeai.backend.model_management.models.ip_adapter", + "invokeai.backend.model_management.models.lora", + "invokeai.backend.model_management.models.sdxl", + "invokeai.backend.model_management.models.stable_diffusion", + "invokeai.backend.model_management.models.vae", + "invokeai.backend.model_management.seamless", + "invokeai.backend.model_management.util", + "invokeai.backend.stable_diffusion.diffusers_pipeline", + "invokeai.backend.stable_diffusion.diffusion.cross_attention_control", + "invokeai.backend.stable_diffusion.diffusion.shared_invokeai_diffusion", + "invokeai.backend.util.hotfixes", + "invokeai.backend.util.logging", + "invokeai.backend.util.mps_fixes", + "invokeai.backend.util.util", + "invokeai.frontend.install.model_install", +] diff --git a/tests/nodes/test_node_graph.py b/tests/nodes/test_node_graph.py index 822ffc1588..3c965895f9 100644 --- a/tests/nodes/test_node_graph.py +++ b/tests/nodes/test_node_graph.py @@ -1,4 +1,5 @@ import pytest +from pydantic import TypeAdapter from invokeai.app.invocations.baseinvocation import ( BaseInvocation, @@ -593,20 +594,21 @@ def test_graph_can_serialize(): g.add_edge(e) # Not throwing on this line is sufficient - _ = g.json() + _ = g.model_dump_json() def test_graph_can_deserialize(): g = Graph() n1 = TextToImageTestInvocation(id="1", prompt="Banana sushi") - n2 = ESRGANInvocation(id="2") + n2 = ImageToImageTestInvocation(id="2") g.add_node(n1) g.add_node(n2) e = create_edge(n1.id, "image", n2.id, "image") g.add_edge(e) - json = g.json() - g2 = Graph.parse_raw(json) + json = g.model_dump_json() + adapter_graph = TypeAdapter(Graph) + g2 = adapter_graph.validate_json(json) assert g2 is not None assert g2.nodes["1"] is not None @@ -619,7 +621,7 @@ def test_graph_can_deserialize(): def test_invocation_decorator(): - invocation_type = "test_invocation" + invocation_type = "test_invocation_decorator" title = "Test Invocation" tags = ["first", "second", "third"] category = "category" @@ -630,7 +632,7 @@ def test_invocation_decorator(): def invoke(self): pass - schema = TestInvocation.schema() + schema = TestInvocation.model_json_schema() assert schema.get("title") == title assert schema.get("tags") == tags @@ -640,18 +642,17 @@ def test_invocation_decorator(): def test_invocation_version_must_be_semver(): - invocation_type = "test_invocation" valid_version = "1.0.0" invalid_version = "not_semver" - @invocation(invocation_type, version=valid_version) + @invocation("test_invocation_version_valid", version=valid_version) class ValidVersionInvocation(BaseInvocation): def invoke(self): pass with pytest.raises(InvalidVersionError): - @invocation(invocation_type, version=invalid_version) + @invocation("test_invocation_version_invalid", version=invalid_version) class InvalidVersionInvocation(BaseInvocation): def invoke(self): pass @@ -694,4 +695,4 @@ def test_ints_do_not_accept_floats(): def test_graph_can_generate_schema(): # Not throwing on this line is sufficient # NOTE: if this test fails, it's PROBABLY because a new invocation type is breaking schema generation - _ = Graph.schema_json(indent=2) + _ = Graph.model_json_schema() diff --git a/tests/nodes/test_session_queue.py b/tests/nodes/test_session_queue.py index 6dd7c4845a..731316068c 100644 --- a/tests/nodes/test_session_queue.py +++ b/tests/nodes/test_session_queue.py @@ -1,5 +1,5 @@ import pytest -from pydantic import ValidationError, parse_raw_as +from pydantic import TypeAdapter, ValidationError from invokeai.app.services.session_queue.session_queue_common import ( Batch, @@ -150,8 +150,9 @@ def test_prepare_values_to_insert(batch_data_collection, batch_graph): values = prepare_values_to_insert(queue_id="default", batch=b, priority=0, max_new_queue_items=1000) assert len(values) == 8 + session_adapter = TypeAdapter(GraphExecutionState) # graph should be serialized - ges = parse_raw_as(GraphExecutionState, values[0].session) + ges = session_adapter.validate_json(values[0].session) # graph values should be populated assert ges.graph.get_node("1").prompt == "Banana sushi" @@ -160,15 +161,16 @@ def test_prepare_values_to_insert(batch_data_collection, batch_graph): assert ges.graph.get_node("4").prompt == "Nissan" # session ids should match deserialized graph - assert [v.session_id for v in values] == [parse_raw_as(GraphExecutionState, v.session).id for v in values] + assert [v.session_id for v in values] == [session_adapter.validate_json(v.session).id for v in values] # should unique session ids sids = [v.session_id for v in values] assert len(sids) == len(set(sids)) + nfv_list_adapter = TypeAdapter(list[NodeFieldValue]) # should have 3 node field values assert type(values[0].field_values) is str - assert len(parse_raw_as(list[NodeFieldValue], values[0].field_values)) == 3 + assert len(nfv_list_adapter.validate_json(values[0].field_values)) == 3 # should have batch id and priority assert all(v.batch_id == b.batch_id for v in values) diff --git a/tests/nodes/test_sqlite.py b/tests/nodes/test_sqlite.py index 6e4da8b36e..818f9d048f 100644 --- a/tests/nodes/test_sqlite.py +++ b/tests/nodes/test_sqlite.py @@ -15,7 +15,8 @@ class TestModel(BaseModel): @pytest.fixture def db() -> SqliteItemStorage[TestModel]: sqlite_db = SqliteDatabase(InvokeAIAppConfig(use_memory_db=True), InvokeAILogger.get_logger()) - return SqliteItemStorage[TestModel](db=sqlite_db, table_name="test", id_field="id") + sqlite_item_storage = SqliteItemStorage[TestModel](db=sqlite_db, table_name="test", id_field="id") + return sqlite_item_storage def test_sqlite_service_can_create_and_get(db: SqliteItemStorage[TestModel]): From 2c39557dc9e16470ae179c5dd5a541f1db9f09d3 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 14:28:29 +1100 Subject: [PATCH 03/24] fix(nodes): fix metadata validation error --- invokeai/app/invocations/metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/app/invocations/metadata.py b/invokeai/app/invocations/metadata.py index 9578fc3ae9..4d76926aaa 100644 --- a/invokeai/app/invocations/metadata.py +++ b/invokeai/app/invocations/metadata.py @@ -48,7 +48,7 @@ class CoreMetadata(BaseModelExcludeNull): default=None, description="The generation mode that output this image", ) - created_by: Optional[str] = Field(description="The name of the creator of the image") + created_by: Optional[str] = Field(default=None, description="The name of the creator of the image") positive_prompt: Optional[str] = Field(default=None, description="The positive prompt parameter") negative_prompt: Optional[str] = Field(default=None, description="The negative prompt parameter") width: Optional[int] = Field(default=None, description="The width parameter") From 685cda89ff5d2448857d195d3f79a258bbe48c14 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 14:28:39 +1100 Subject: [PATCH 04/24] feat(api): restore get_session route --- invokeai/app/api/routers/sessions.py | 456 +++++++++++++-------------- invokeai/app/api_app.py | 4 +- 2 files changed, 226 insertions(+), 234 deletions(-) diff --git a/invokeai/app/api/routers/sessions.py b/invokeai/app/api/routers/sessions.py index cd93a267ad..fb850d0b2b 100644 --- a/invokeai/app/api/routers/sessions.py +++ b/invokeai/app/api/routers/sessions.py @@ -1,57 +1,50 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) -from typing import Annotated, Optional, Union -from fastapi import Body, HTTPException, Path, Query, Response +from fastapi import HTTPException, Path from fastapi.routing import APIRouter -from pydantic.fields import Field -from invokeai.app.services.shared.pagination import PaginatedResults - -# Importing * is bad karma but needed here for node detection -from ...invocations import * # noqa: F401 F403 -from ...invocations.baseinvocation import BaseInvocation -from ...services.shared.graph import Edge, EdgeConnection, Graph, GraphExecutionState, NodeAlreadyExecutedError +from ...services.shared.graph import GraphExecutionState from ..dependencies import ApiDependencies session_router = APIRouter(prefix="/v1/sessions", tags=["sessions"]) -@session_router.post( - "/", - operation_id="create_session", - responses={ - 200: {"model": GraphExecutionState}, - 400: {"description": "Invalid json"}, - }, - deprecated=True, -) -async def create_session( - queue_id: str = Query(default="", description="The id of the queue to associate the session with"), - graph: Optional[Graph] = Body(default=None, description="The graph to initialize the session with"), -) -> GraphExecutionState: - """Creates a new session, optionally initializing it with an invocation graph""" - session = ApiDependencies.invoker.create_execution_state(queue_id=queue_id, graph=graph) - return session +# @session_router.post( +# "/", +# operation_id="create_session", +# responses={ +# 200: {"model": GraphExecutionState}, +# 400: {"description": "Invalid json"}, +# }, +# deprecated=True, +# ) +# async def create_session( +# queue_id: str = Query(default="", description="The id of the queue to associate the session with"), +# graph: Optional[Graph] = Body(default=None, description="The graph to initialize the session with"), +# ) -> GraphExecutionState: +# """Creates a new session, optionally initializing it with an invocation graph""" +# session = ApiDependencies.invoker.create_execution_state(queue_id=queue_id, graph=graph) +# return session -@session_router.get( - "/", - operation_id="list_sessions", - responses={200: {"model": PaginatedResults[GraphExecutionState]}}, - deprecated=True, -) -async def list_sessions( - page: int = Query(default=0, description="The page of results to get"), - per_page: int = Query(default=10, description="The number of results per page"), - query: str = Query(default="", description="The query string to search for"), -) -> PaginatedResults[GraphExecutionState]: - """Gets a list of sessions, optionally searching""" - if query == "": - result = ApiDependencies.invoker.services.graph_execution_manager.list(page, per_page) - else: - result = ApiDependencies.invoker.services.graph_execution_manager.search(query, page, per_page) - return result +# @session_router.get( +# "/", +# operation_id="list_sessions", +# responses={200: {"model": PaginatedResults[GraphExecutionState]}}, +# deprecated=True, +# ) +# async def list_sessions( +# page: int = Query(default=0, description="The page of results to get"), +# per_page: int = Query(default=10, description="The number of results per page"), +# query: str = Query(default="", description="The query string to search for"), +# ) -> PaginatedResults[GraphExecutionState]: +# """Gets a list of sessions, optionally searching""" +# if query == "": +# result = ApiDependencies.invoker.services.graph_execution_manager.list(page, per_page) +# else: +# result = ApiDependencies.invoker.services.graph_execution_manager.search(query, page, per_page) +# return result @session_router.get( @@ -61,7 +54,6 @@ async def list_sessions( 200: {"model": GraphExecutionState}, 404: {"description": "Session not found"}, }, - deprecated=True, ) async def get_session( session_id: str = Path(description="The id of the session to get"), @@ -74,211 +66,211 @@ async def get_session( return session -@session_router.post( - "/{session_id}/nodes", - operation_id="add_node", - responses={ - 200: {"model": str}, - 400: {"description": "Invalid node or link"}, - 404: {"description": "Session not found"}, - }, - deprecated=True, -) -async def add_node( - session_id: str = Path(description="The id of the session"), - node: Annotated[Union[BaseInvocation.get_invocations()], Field(discriminator="type")] = Body( # type: ignore - description="The node to add" - ), -) -> str: - """Adds a node to the graph""" - session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) - if session is None: - raise HTTPException(status_code=404) +# @session_router.post( +# "/{session_id}/nodes", +# operation_id="add_node", +# responses={ +# 200: {"model": str}, +# 400: {"description": "Invalid node or link"}, +# 404: {"description": "Session not found"}, +# }, +# deprecated=True, +# ) +# async def add_node( +# session_id: str = Path(description="The id of the session"), +# node: Annotated[Union[BaseInvocation.get_invocations()], Field(discriminator="type")] = Body( # type: ignore +# description="The node to add" +# ), +# ) -> str: +# """Adds a node to the graph""" +# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) +# if session is None: +# raise HTTPException(status_code=404) - try: - session.add_node(node) - ApiDependencies.invoker.services.graph_execution_manager.set( - session - ) # TODO: can this be done automatically, or add node through an API? - return session.id - except NodeAlreadyExecutedError: - raise HTTPException(status_code=400) - except IndexError: - raise HTTPException(status_code=400) +# try: +# session.add_node(node) +# ApiDependencies.invoker.services.graph_execution_manager.set( +# session +# ) # TODO: can this be done automatically, or add node through an API? +# return session.id +# except NodeAlreadyExecutedError: +# raise HTTPException(status_code=400) +# except IndexError: +# raise HTTPException(status_code=400) -@session_router.put( - "/{session_id}/nodes/{node_path}", - operation_id="update_node", - responses={ - 200: {"model": GraphExecutionState}, - 400: {"description": "Invalid node or link"}, - 404: {"description": "Session not found"}, - }, - deprecated=True, -) -async def update_node( - session_id: str = Path(description="The id of the session"), - node_path: str = Path(description="The path to the node in the graph"), - node: Annotated[Union[BaseInvocation.get_invocations()], Field(discriminator="type")] = Body( # type: ignore - description="The new node" - ), -) -> GraphExecutionState: - """Updates a node in the graph and removes all linked edges""" - session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) - if session is None: - raise HTTPException(status_code=404) +# @session_router.put( +# "/{session_id}/nodes/{node_path}", +# operation_id="update_node", +# responses={ +# 200: {"model": GraphExecutionState}, +# 400: {"description": "Invalid node or link"}, +# 404: {"description": "Session not found"}, +# }, +# deprecated=True, +# ) +# async def update_node( +# session_id: str = Path(description="The id of the session"), +# node_path: str = Path(description="The path to the node in the graph"), +# node: Annotated[Union[BaseInvocation.get_invocations()], Field(discriminator="type")] = Body( # type: ignore +# description="The new node" +# ), +# ) -> GraphExecutionState: +# """Updates a node in the graph and removes all linked edges""" +# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) +# if session is None: +# raise HTTPException(status_code=404) - try: - session.update_node(node_path, node) - ApiDependencies.invoker.services.graph_execution_manager.set( - session - ) # TODO: can this be done automatically, or add node through an API? - return session - except NodeAlreadyExecutedError: - raise HTTPException(status_code=400) - except IndexError: - raise HTTPException(status_code=400) +# try: +# session.update_node(node_path, node) +# ApiDependencies.invoker.services.graph_execution_manager.set( +# session +# ) # TODO: can this be done automatically, or add node through an API? +# return session +# except NodeAlreadyExecutedError: +# raise HTTPException(status_code=400) +# except IndexError: +# raise HTTPException(status_code=400) -@session_router.delete( - "/{session_id}/nodes/{node_path}", - operation_id="delete_node", - responses={ - 200: {"model": GraphExecutionState}, - 400: {"description": "Invalid node or link"}, - 404: {"description": "Session not found"}, - }, - deprecated=True, -) -async def delete_node( - session_id: str = Path(description="The id of the session"), - node_path: str = Path(description="The path to the node to delete"), -) -> GraphExecutionState: - """Deletes a node in the graph and removes all linked edges""" - session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) - if session is None: - raise HTTPException(status_code=404) +# @session_router.delete( +# "/{session_id}/nodes/{node_path}", +# operation_id="delete_node", +# responses={ +# 200: {"model": GraphExecutionState}, +# 400: {"description": "Invalid node or link"}, +# 404: {"description": "Session not found"}, +# }, +# deprecated=True, +# ) +# async def delete_node( +# session_id: str = Path(description="The id of the session"), +# node_path: str = Path(description="The path to the node to delete"), +# ) -> GraphExecutionState: +# """Deletes a node in the graph and removes all linked edges""" +# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) +# if session is None: +# raise HTTPException(status_code=404) - try: - session.delete_node(node_path) - ApiDependencies.invoker.services.graph_execution_manager.set( - session - ) # TODO: can this be done automatically, or add node through an API? - return session - except NodeAlreadyExecutedError: - raise HTTPException(status_code=400) - except IndexError: - raise HTTPException(status_code=400) +# try: +# session.delete_node(node_path) +# ApiDependencies.invoker.services.graph_execution_manager.set( +# session +# ) # TODO: can this be done automatically, or add node through an API? +# return session +# except NodeAlreadyExecutedError: +# raise HTTPException(status_code=400) +# except IndexError: +# raise HTTPException(status_code=400) -@session_router.post( - "/{session_id}/edges", - operation_id="add_edge", - responses={ - 200: {"model": GraphExecutionState}, - 400: {"description": "Invalid node or link"}, - 404: {"description": "Session not found"}, - }, - deprecated=True, -) -async def add_edge( - session_id: str = Path(description="The id of the session"), - edge: Edge = Body(description="The edge to add"), -) -> GraphExecutionState: - """Adds an edge to the graph""" - session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) - if session is None: - raise HTTPException(status_code=404) +# @session_router.post( +# "/{session_id}/edges", +# operation_id="add_edge", +# responses={ +# 200: {"model": GraphExecutionState}, +# 400: {"description": "Invalid node or link"}, +# 404: {"description": "Session not found"}, +# }, +# deprecated=True, +# ) +# async def add_edge( +# session_id: str = Path(description="The id of the session"), +# edge: Edge = Body(description="The edge to add"), +# ) -> GraphExecutionState: +# """Adds an edge to the graph""" +# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) +# if session is None: +# raise HTTPException(status_code=404) - try: - session.add_edge(edge) - ApiDependencies.invoker.services.graph_execution_manager.set( - session - ) # TODO: can this be done automatically, or add node through an API? - return session - except NodeAlreadyExecutedError: - raise HTTPException(status_code=400) - except IndexError: - raise HTTPException(status_code=400) +# try: +# session.add_edge(edge) +# ApiDependencies.invoker.services.graph_execution_manager.set( +# session +# ) # TODO: can this be done automatically, or add node through an API? +# return session +# except NodeAlreadyExecutedError: +# raise HTTPException(status_code=400) +# except IndexError: +# raise HTTPException(status_code=400) -# TODO: the edge being in the path here is really ugly, find a better solution -@session_router.delete( - "/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}", - operation_id="delete_edge", - responses={ - 200: {"model": GraphExecutionState}, - 400: {"description": "Invalid node or link"}, - 404: {"description": "Session not found"}, - }, - deprecated=True, -) -async def delete_edge( - session_id: str = Path(description="The id of the session"), - from_node_id: str = Path(description="The id of the node the edge is coming from"), - from_field: str = Path(description="The field of the node the edge is coming from"), - to_node_id: str = Path(description="The id of the node the edge is going to"), - to_field: str = Path(description="The field of the node the edge is going to"), -) -> GraphExecutionState: - """Deletes an edge from the graph""" - session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) - if session is None: - raise HTTPException(status_code=404) +# # TODO: the edge being in the path here is really ugly, find a better solution +# @session_router.delete( +# "/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}", +# operation_id="delete_edge", +# responses={ +# 200: {"model": GraphExecutionState}, +# 400: {"description": "Invalid node or link"}, +# 404: {"description": "Session not found"}, +# }, +# deprecated=True, +# ) +# async def delete_edge( +# session_id: str = Path(description="The id of the session"), +# from_node_id: str = Path(description="The id of the node the edge is coming from"), +# from_field: str = Path(description="The field of the node the edge is coming from"), +# to_node_id: str = Path(description="The id of the node the edge is going to"), +# to_field: str = Path(description="The field of the node the edge is going to"), +# ) -> GraphExecutionState: +# """Deletes an edge from the graph""" +# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) +# if session is None: +# raise HTTPException(status_code=404) - try: - edge = Edge( - source=EdgeConnection(node_id=from_node_id, field=from_field), - destination=EdgeConnection(node_id=to_node_id, field=to_field), - ) - session.delete_edge(edge) - ApiDependencies.invoker.services.graph_execution_manager.set( - session - ) # TODO: can this be done automatically, or add node through an API? - return session - except NodeAlreadyExecutedError: - raise HTTPException(status_code=400) - except IndexError: - raise HTTPException(status_code=400) +# try: +# edge = Edge( +# source=EdgeConnection(node_id=from_node_id, field=from_field), +# destination=EdgeConnection(node_id=to_node_id, field=to_field), +# ) +# session.delete_edge(edge) +# ApiDependencies.invoker.services.graph_execution_manager.set( +# session +# ) # TODO: can this be done automatically, or add node through an API? +# return session +# except NodeAlreadyExecutedError: +# raise HTTPException(status_code=400) +# except IndexError: +# raise HTTPException(status_code=400) -@session_router.put( - "/{session_id}/invoke", - operation_id="invoke_session", - responses={ - 200: {"model": None}, - 202: {"description": "The invocation is queued"}, - 400: {"description": "The session has no invocations ready to invoke"}, - 404: {"description": "Session not found"}, - }, - deprecated=True, -) -async def invoke_session( - queue_id: str = Query(description="The id of the queue to associate the session with"), - session_id: str = Path(description="The id of the session to invoke"), - all: bool = Query(default=False, description="Whether or not to invoke all remaining invocations"), -) -> Response: - """Invokes a session""" - session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) - if session is None: - raise HTTPException(status_code=404) +# @session_router.put( +# "/{session_id}/invoke", +# operation_id="invoke_session", +# responses={ +# 200: {"model": None}, +# 202: {"description": "The invocation is queued"}, +# 400: {"description": "The session has no invocations ready to invoke"}, +# 404: {"description": "Session not found"}, +# }, +# deprecated=True, +# ) +# async def invoke_session( +# queue_id: str = Query(description="The id of the queue to associate the session with"), +# session_id: str = Path(description="The id of the session to invoke"), +# all: bool = Query(default=False, description="Whether or not to invoke all remaining invocations"), +# ) -> Response: +# """Invokes a session""" +# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) +# if session is None: +# raise HTTPException(status_code=404) - if session.is_complete(): - raise HTTPException(status_code=400) +# if session.is_complete(): +# raise HTTPException(status_code=400) - ApiDependencies.invoker.invoke(queue_id, session, invoke_all=all) - return Response(status_code=202) +# ApiDependencies.invoker.invoke(queue_id, session, invoke_all=all) +# return Response(status_code=202) -@session_router.delete( - "/{session_id}/invoke", - operation_id="cancel_session_invoke", - responses={202: {"description": "The invocation is canceled"}}, - deprecated=True, -) -async def cancel_session_invoke( - session_id: str = Path(description="The id of the session to cancel"), -) -> Response: - """Invokes a session""" - ApiDependencies.invoker.cancel(session_id) - return Response(status_code=202) +# @session_router.delete( +# "/{session_id}/invoke", +# operation_id="cancel_session_invoke", +# responses={202: {"description": "The invocation is canceled"}}, +# deprecated=True, +# ) +# async def cancel_session_invoke( +# session_id: str = Path(description="The id of the session to cancel"), +# ) -> Response: +# """Invokes a session""" +# ApiDependencies.invoker.cancel(session_id) +# return Response(status_code=202) diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index 5bbd8150c1..fa68d1b3e7 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -31,7 +31,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c from ..backend.util.logging import InvokeAILogger from .api.dependencies import ApiDependencies - from .api.routers import app_info, board_images, boards, images, models, session_queue, utilities + from .api.routers import app_info, board_images, boards, images, models, sessions, session_queue, utilities from .api.sockets import SocketIO from .invocations.baseinvocation import BaseInvocation, UIConfigBase, _InputField, _OutputField @@ -85,7 +85,7 @@ async def shutdown_event(): # Include all routers -# app.include_router(sessions.session_router, prefix="/api") +app.include_router(sessions.session_router, prefix="/api") app.include_router(utilities.utilities_router, prefix="/api") From 9d9592230a402fe9e632b52d07c510406e62bc86 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 14:29:47 +1100 Subject: [PATCH 05/24] chore: lint --- invokeai/app/api_app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index fa68d1b3e7..e07b037dd1 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -31,7 +31,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c from ..backend.util.logging import InvokeAILogger from .api.dependencies import ApiDependencies - from .api.routers import app_info, board_images, boards, images, models, sessions, session_queue, utilities + from .api.routers import app_info, board_images, boards, images, models, session_queue, sessions, utilities from .api.sockets import SocketIO from .invocations.baseinvocation import BaseInvocation, UIConfigBase, _InputField, _OutputField From a094f4ca2b4e28445bf52388f60b296097ea693a Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 14:37:53 +1100 Subject: [PATCH 06/24] fix: pin `python-socketio~=5.10.0` --- pyproject.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 4c8ec0f5e8..03fc45c5dc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,10 +35,10 @@ dependencies = [ "accelerate~=0.23.0", "albumentations", "click", - "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", + "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", "compel~=2.0.2", "controlnet-aux>=0.0.6", - "timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26 + "timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26 "datasets", # When bumping diffusers beyond 0.21, make sure to address this: # https://github.com/invoke-ai/InvokeAI/blob/fc09ab7e13cb7ca5389100d149b6422ace7b8ed3/invokeai/app/invocations/latent.py#L513 @@ -70,7 +70,7 @@ dependencies = [ 'pyperclip', "pyreadline3", "python-multipart", - "python-socketio", + "python-socketio~=5.10.0", "pytorch-lightning", "realesrgan", "requests~=2.28.2", From c69715636dcd6bbb425d01a1a63d3a84b40bb212 Mon Sep 17 00:00:00 2001 From: Surisen Date: Tue, 17 Oct 2023 06:15:20 +0200 Subject: [PATCH 07/24] translationBot(ui): update translation (Chinese (Simplified)) Currently translated at 100.0% (1217 of 1217 strings) Co-authored-by: Surisen Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/zh_Hans/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/zh_CN.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index 23940542a9..3f896076d4 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -1101,16 +1101,16 @@ "contentShuffle": "Content Shuffle", "f": "F", "h": "H", - "controlnet": "$t(controlnet.controlAdapter) #{{number}} ($t(common.controlNet))", + "controlnet": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.controlNet))", "control": "Control (普通控制)", "coarse": "Coarse", "depthMidas": "Depth (Midas)", "w": "W", - "ip_adapter": "$t(controlnet.controlAdapter) #{{number}} ($t(common.ipAdapter))", + "ip_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.ipAdapter))", "mediapipeFace": "Mediapipe Face", "mlsd": "M-LSD", "lineart": "Lineart", - "t2i_adapter": "$t(controlnet.controlAdapter) #{{number}} ($t(common.t2iAdapter))", + "t2i_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.t2iAdapter))", "megaControl": "Mega Control (超级控制)", "depthZoe": "Depth (Zoe)", "colorMap": "Color", From 9542883bb5b88e97a66dfe4da60d366238dcfb41 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 28 Sep 2023 09:28:41 -0400 Subject: [PATCH 08/24] update requirements to python 3.10-11 --- docs/installation/010_INSTALL_AUTOMATED.md | 4 ++-- docs/installation/020_INSTALL_MANUAL.md | 4 ++-- docs/installation/060_INSTALL_PATCHMATCH.md | 3 +-- installer/install.bat.in | 13 ++++++------- installer/install.sh.in | 4 ++-- installer/lib/installer.py | 10 +--------- installer/readme.txt | 8 ++++---- pyproject.toml | 2 +- 8 files changed, 19 insertions(+), 29 deletions(-) diff --git a/docs/installation/010_INSTALL_AUTOMATED.md b/docs/installation/010_INSTALL_AUTOMATED.md index 0937c07bca..52192f33c0 100644 --- a/docs/installation/010_INSTALL_AUTOMATED.md +++ b/docs/installation/010_INSTALL_AUTOMATED.md @@ -40,7 +40,7 @@ experimental versions later. this, open up a command-line window ("Terminal" on Linux and Macintosh, "Command" or "Powershell" on Windows) and type `python --version`. If Python is installed, it will print out the version - number. If it is version `3.9.*`, `3.10.*` or `3.11.*` you meet + number. If it is version `3.10.*` or `3.11.*` you meet requirements. !!! warning "What to do if you have an unsupported version" @@ -48,7 +48,7 @@ experimental versions later. Go to [Python Downloads](https://www.python.org/downloads/) and download the appropriate installer package for your platform. We recommend [Version - 3.10.9](https://www.python.org/downloads/release/python-3109/), + 3.10.12](https://www.python.org/downloads/release/python-3109/), which has been extensively tested with InvokeAI. _Please select your platform in the section below for platform-specific diff --git a/docs/installation/020_INSTALL_MANUAL.md b/docs/installation/020_INSTALL_MANUAL.md index a19992d266..27484c0ffd 100644 --- a/docs/installation/020_INSTALL_MANUAL.md +++ b/docs/installation/020_INSTALL_MANUAL.md @@ -32,7 +32,7 @@ gaming): * **Python** - version 3.9 through 3.11 + version 3.10 through 3.11 * **CUDA Tools** @@ -65,7 +65,7 @@ gaming): To install InvokeAI with virtual environments and the PIP package manager, please follow these steps: -1. Please make sure you are using Python 3.9 through 3.11. The rest of the install +1. Please make sure you are using Python 3.10 through 3.11. The rest of the install procedure depends on this and will not work with other versions: ```bash diff --git a/docs/installation/060_INSTALL_PATCHMATCH.md b/docs/installation/060_INSTALL_PATCHMATCH.md index ccfd19d207..a9646f8b60 100644 --- a/docs/installation/060_INSTALL_PATCHMATCH.md +++ b/docs/installation/060_INSTALL_PATCHMATCH.md @@ -59,8 +59,7 @@ Prior to installing PyPatchMatch, you need to take the following steps: `from patchmatch import patch_match`: It should look like the following: ```py - Python 3.9.5 (default, Nov 23 2021, 15:27:38) - [GCC 9.3.0] on linux + Python 3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0] on linux Type "help", "copyright", "credits" or "license" for more information. >>> from patchmatch import patch_match Compiling and loading c extensions from "/home/lstein/Projects/InvokeAI/.invokeai-env/src/pypatchmatch/patchmatch". diff --git a/installer/install.bat.in b/installer/install.bat.in index ffe96d4355..5fa76471de 100644 --- a/installer/install.bat.in +++ b/installer/install.bat.in @@ -1,7 +1,7 @@ @echo off setlocal EnableExtensions EnableDelayedExpansion -@rem This script requires the user to install Python 3.9 or higher. All other +@rem This script requires the user to install Python 3.10 or higher. All other @rem requirements are downloaded as needed. @rem change to the script's directory @@ -19,7 +19,7 @@ set INVOKEAI_VERSION=latest set INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/ set TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting set PYTHON_URL=https://www.python.org/downloads/windows/ -set MINIMUM_PYTHON_VERSION=3.9.0 +set MINIMUM_PYTHON_VERSION=3.10.0 set PYTHON_URL=https://www.python.org/downloads/release/python-3109/ set err_msg=An error has occurred and the script could not continue. @@ -28,8 +28,7 @@ set err_msg=An error has occurred and the script could not continue. echo This script will install InvokeAI and its dependencies. echo. echo BEFORE YOU START PLEASE MAKE SURE TO DO THE FOLLOWING -echo 1. Install python 3.9 or 3.10. Python version 3.11 and above are -echo not supported at the moment. +echo 1. Install python 3.10 or 3.11. Python version 3.9 is no longer supported. echo 2. Double-click on the file WinLongPathsEnabled.reg in order to echo enable long path support on your system. echo 3. Install the Visual C++ core libraries. @@ -46,19 +45,19 @@ echo ***** Checking and Updating Python ***** call python --version >.tmp1 2>.tmp2 if %errorlevel% == 1 ( - set err_msg=Please install Python 3.10. See %INSTRUCTIONS% for details. + set err_msg=Please install Python 3.10-11. See %INSTRUCTIONS% for details. goto err_exit ) for /f "tokens=2" %%i in (.tmp1) do set python_version=%%i if "%python_version%" == "" ( - set err_msg=No python was detected on your system. Please install Python version %MINIMUM_PYTHON_VERSION% or higher. We recommend Python 3.10.9 from %PYTHON_URL% + set err_msg=No python was detected on your system. Please install Python version %MINIMUM_PYTHON_VERSION% or higher. We recommend Python 3.10.12 from %PYTHON_URL% goto err_exit ) call :compareVersions %MINIMUM_PYTHON_VERSION% %python_version% if %errorlevel% == 1 ( - set err_msg=Your version of Python is too low. You need at least %MINIMUM_PYTHON_VERSION% but you have %python_version%. We recommend Python 3.10.9 from %PYTHON_URL% + set err_msg=Your version of Python is too low. You need at least %MINIMUM_PYTHON_VERSION% but you have %python_version%. We recommend Python 3.10.12 from %PYTHON_URL% goto err_exit ) diff --git a/installer/install.sh.in b/installer/install.sh.in index 1b8ba92ea6..9cf41192bf 100755 --- a/installer/install.sh.in +++ b/installer/install.sh.in @@ -8,10 +8,10 @@ cd $scriptdir function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; } -MINIMUM_PYTHON_VERSION=3.9.0 +MINIMUM_PYTHON_VERSION=3.10.0 MAXIMUM_PYTHON_VERSION=3.11.100 PYTHON="" -for candidate in python3.11 python3.10 python3.9 python3 python ; do +for candidate in python3.11 python3.10 python3 python ; do if ppath=`which $candidate`; then # when using `pyenv`, the executable for an inactive Python version will exist but will not be operational # we check that this found executable can actually run diff --git a/installer/lib/installer.py b/installer/lib/installer.py index 70ed4d4331..bf48e3b06d 100644 --- a/installer/lib/installer.py +++ b/installer/lib/installer.py @@ -13,7 +13,7 @@ from pathlib import Path from tempfile import TemporaryDirectory from typing import Union -SUPPORTED_PYTHON = ">=3.9.0,<=3.11.100" +SUPPORTED_PYTHON = ">=3.10.0,<=3.11.100" INSTALLER_REQS = ["rich", "semver", "requests", "plumbum", "prompt-toolkit"] BOOTSTRAP_VENV_PREFIX = "invokeai-installer-tmp" @@ -67,7 +67,6 @@ class Installer: # Cleaning up temporary directories on Windows results in a race condition # and a stack trace. # `ignore_cleanup_errors` was only added in Python 3.10 - # users of Python 3.9 will see a gnarly stack trace on installer exit if OS == "Windows" and int(platform.python_version_tuple()[1]) >= 10: venv_dir = TemporaryDirectory(prefix=BOOTSTRAP_VENV_PREFIX, ignore_cleanup_errors=True) else: @@ -139,13 +138,6 @@ class Installer: except shutil.SameFileError: venv.create(venv_dir, with_pip=True, symlinks=True) - # upgrade pip in Python 3.9 environments - if int(platform.python_version_tuple()[1]) == 9: - from plumbum import FG, local - - pip = local[get_pip_from_venv(venv_dir)] - pip["install", "--upgrade", "pip"] & FG - return venv_dir def install( diff --git a/installer/readme.txt b/installer/readme.txt index b9a97e2093..ef040c3913 100644 --- a/installer/readme.txt +++ b/installer/readme.txt @@ -4,7 +4,7 @@ Project homepage: https://github.com/invoke-ai/InvokeAI Preparations: - You will need to install Python 3.9 or higher for this installer + You will need to install Python 3.10 or higher for this installer to work. Instructions are given here: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/ @@ -14,15 +14,15 @@ Preparations: python --version If all is well, it will print "Python 3.X.X", where the version number - is at least 3.9.*, and not higher than 3.11.*. + is at least 3.10.*, and not higher than 3.11.*. If this works, check the version of the Python package manager, pip: pip --version You should get a message that indicates that the pip package - installer was derived from Python 3.9 or 3.10. For example: - "pip 22.3.1 from /usr/bin/pip (python 3.9)" + installer was derived from Python 3.10 or 3.11. For example: + "pip 22.0.1 from /usr/bin/pip (python 3.10)" Long Paths on Windows: diff --git a/pyproject.toml b/pyproject.toml index 03fc45c5dc..67486e1120 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "InvokeAI" description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process" -requires-python = ">=3.9, <3.12" +requires-python = ">=3.10, <3.12" readme = { content-type = "text/markdown", file = "README.md" } keywords = ["stable-diffusion", "AI"] dynamic = ["version"] From f11ba81a8d5e200c2985658ad05d1b6f0ad4f593 Mon Sep 17 00:00:00 2001 From: user1 Date: Tue, 10 Oct 2023 11:29:19 -0700 Subject: [PATCH 09/24] Fixing some var and arg names. --- .../stable_diffusion/diffusers_pipeline.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 0943b78bf8..5681a04695 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -546,11 +546,13 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): # Handle ControlNet(s) and T2I-Adapter(s) down_block_additional_residuals = None mid_block_additional_residual = None - if control_data is not None and t2i_adapter_data is not None: + down_intrablock_additional_residuals = None + # if control_data is not None and t2i_adapter_data is not None: # TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility # between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers. - raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).") - elif control_data is not None: + # raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).") + # elif control_data is not None: + if control_data is not None: down_block_additional_residuals, mid_block_additional_residual = self.invokeai_diffuser.do_controlnet_step( control_data=control_data, sample=latent_model_input, @@ -559,7 +561,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): total_step_count=total_step_count, conditioning_data=conditioning_data, ) - elif t2i_adapter_data is not None: + # elif t2i_adapter_data is not None: + if t2i_adapter_data is not None: accum_adapter_state = None for single_t2i_adapter_data in t2i_adapter_data: # Determine the T2I-Adapter weights for the current denoising step. @@ -584,7 +587,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): for idx, value in enumerate(single_t2i_adapter_data.adapter_state): accum_adapter_state[idx] += value * t2i_adapter_weight - down_block_additional_residuals = accum_adapter_state + # down_block_additional_residuals = accum_adapter_state + down_intrablock_additional_residuals = accum_adapter_state uc_noise_pred, c_noise_pred = self.invokeai_diffuser.do_unet_step( sample=latent_model_input, @@ -593,8 +597,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): total_step_count=total_step_count, conditioning_data=conditioning_data, # extra: - down_block_additional_residuals=down_block_additional_residuals, - mid_block_additional_residual=mid_block_additional_residual, + down_block_additional_residuals=down_block_additional_residuals, # for ControlNet + mid_block_additional_residual=mid_block_additional_residual, # for ControlNet + down_intrablock_additional_residuals=down_intrablock_additional_residuals, # for T2I-Adapter ) guidance_scale = conditioning_data.guidance_scale From 378689a519eaebb0181c38d0ad8faf6e747ceb16 Mon Sep 17 00:00:00 2001 From: user1 Date: Tue, 10 Oct 2023 12:25:54 -0700 Subject: [PATCH 10/24] Changes to _apply_standard_conditioning_sequentially() and _apply_cross_attention_controlled_conditioning() to reflect changes to T2I-Adapter implementation to allow usage of T2I-Adapter and ControlNet at the same time. Also, the PREVIOUS commit (@8d3885d, which was already pushed to github repo) was wrongly commented, but too late to fix without a force push or other mucking that I'm reluctant to do. That commit is actually the one that has all the changes to diffusers_pipeline.py to use additional arg down_intrablock_additional_residuals (introduced in diffusers PR https://github.com/huggingface/diffusers/pull/5362) to detangle T2I-Adapter from ControlNet inputs to main UNet. --- .../diffusion/shared_invokeai_diffusion.py | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index c12c86ed92..ef0f3ee261 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -260,7 +260,6 @@ class InvokeAIDiffuserComponent: conditioning_data, **kwargs, ) - else: ( unconditioned_next_x, @@ -407,6 +406,16 @@ class InvokeAIDiffuserComponent: uncond_down_block.append(_uncond_down) cond_down_block.append(_cond_down) + uncond_down_intrablock, cond_down_intrablock = None, None + down_intrablock_additional_residuals = kwargs.pop("down_intrablock_additional_residuals", None) + if down_intrablock_additional_residuals is not None: + uncond_down_intrablock, cond_down_intrablock = [], [] + for down_intrablock in down_intrablock_additional_residuals: + print("down_intrablock shape: ", down_intrablock.shape) + _uncond_down, _cond_down = down_intrablock.chunk(2) + uncond_down_intrablock.append(_uncond_down) + cond_down_intrablock.append(_cond_down) + uncond_mid_block, cond_mid_block = None, None mid_block_additional_residual = kwargs.pop("mid_block_additional_residual", None) if mid_block_additional_residual is not None: @@ -437,6 +446,7 @@ class InvokeAIDiffuserComponent: cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=uncond_down_block, mid_block_additional_residual=uncond_mid_block, + down_intrablock_additional_residuals=uncond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -465,6 +475,7 @@ class InvokeAIDiffuserComponent: cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=cond_down_block, mid_block_additional_residual=cond_mid_block, + down_intrablock_additional_residuals=cond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -489,6 +500,15 @@ class InvokeAIDiffuserComponent: uncond_down_block.append(_uncond_down) cond_down_block.append(_cond_down) + uncond_down_intrablock, cond_down_intrablock = None, None + down_intrablock_additional_residuals = kwargs.pop("down_intrablock_additional_residuals", None) + if down_intrablock_additional_residuals is not None: + uncond_down_intrablock, cond_down_intrablock = [], [] + for down_intrablock in down_intrablock_additional_residuals: + _uncond_down, _cond_down = down_intrablock.chunk(2) + uncond_down_intrablock.append(_uncond_down) + cond_down_intrablock.append(_cond_down) + uncond_mid_block, cond_mid_block = None, None mid_block_additional_residual = kwargs.pop("mid_block_additional_residual", None) if mid_block_additional_residual is not None: @@ -517,6 +537,7 @@ class InvokeAIDiffuserComponent: {"swap_cross_attn_context": cross_attn_processor_context}, down_block_additional_residuals=uncond_down_block, mid_block_additional_residual=uncond_mid_block, + down_intrablock_additional_residuals=uncond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -536,6 +557,7 @@ class InvokeAIDiffuserComponent: {"swap_cross_attn_context": cross_attn_processor_context}, down_block_additional_residuals=cond_down_block, mid_block_additional_residual=cond_mid_block, + down_intrablock_additional_residuals=cond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) From 06f8a3276d96c4d4f4f316d5714fcc885a77c5d2 Mon Sep 17 00:00:00 2001 From: user1 Date: Mon, 16 Oct 2023 10:15:12 -0700 Subject: [PATCH 11/24] Cleaning up (removing diagnostic prints) --- .../stable_diffusion/diffusion/shared_invokeai_diffusion.py | 1 - 1 file changed, 1 deletion(-) diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index ef0f3ee261..d2af522496 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -411,7 +411,6 @@ class InvokeAIDiffuserComponent: if down_intrablock_additional_residuals is not None: uncond_down_intrablock, cond_down_intrablock = [], [] for down_intrablock in down_intrablock_additional_residuals: - print("down_intrablock shape: ", down_intrablock.shape) _uncond_down, _cond_down = down_intrablock.chunk(2) uncond_down_intrablock.append(_uncond_down) cond_down_intrablock.append(_cond_down) From fff29d663db391307db82a84c1a7af644d5b6d45 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 18:54:15 +1100 Subject: [PATCH 12/24] chore: lint --- invokeai/backend/stable_diffusion/diffusers_pipeline.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 5681a04695..1b65326f6e 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -548,8 +548,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): mid_block_additional_residual = None down_intrablock_additional_residuals = None # if control_data is not None and t2i_adapter_data is not None: - # TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility - # between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers. + # TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility + # between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers. # raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).") # elif control_data is not None: if control_data is not None: @@ -598,8 +598,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): conditioning_data=conditioning_data, # extra: down_block_additional_residuals=down_block_additional_residuals, # for ControlNet - mid_block_additional_residual=mid_block_additional_residual, # for ControlNet - down_intrablock_additional_residuals=down_intrablock_additional_residuals, # for T2I-Adapter + mid_block_additional_residual=mid_block_additional_residual, # for ControlNet + down_intrablock_additional_residuals=down_intrablock_additional_residuals, # for T2I-Adapter ) guidance_scale = conditioning_data.guidance_scale From b14699355317fdaf1eaff03b36a5ce85fedb2943 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 18:58:02 +1100 Subject: [PATCH 13/24] feat(ui): remove special handling for t2i vs controlnet --- .../middleware/listenerMiddleware/index.ts | 7 +- .../listeners/controlAdapterAddedOrEnabled.ts | 87 ------------------- .../store/controlAdaptersSlice.ts | 67 -------------- .../frontend/web/src/services/api/schema.d.ts | 71 +++++++++++---- 4 files changed, 55 insertions(+), 177 deletions(-) delete mode 100644 invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts index cbc88966a7..772ea216c0 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts @@ -12,6 +12,7 @@ import { addFirstListImagesListener } from './listeners/addFirstListImagesListen import { addAnyEnqueuedListener } from './listeners/anyEnqueued'; import { addAppConfigReceivedListener } from './listeners/appConfigReceived'; import { addAppStartedListener } from './listeners/appStarted'; +import { addBatchEnqueuedListener } from './listeners/batchEnqueued'; import { addDeleteBoardAndImagesFulfilledListener } from './listeners/boardAndImagesDeleted'; import { addBoardIdSelectedListener } from './listeners/boardIdSelected'; import { addCanvasCopiedToClipboardListener } from './listeners/canvasCopiedToClipboard'; @@ -71,8 +72,6 @@ import { addStagingAreaImageSavedListener } from './listeners/stagingAreaImageSa import { addTabChangedListener } from './listeners/tabChanged'; import { addUpscaleRequestedListener } from './listeners/upscaleRequested'; import { addWorkflowLoadedListener } from './listeners/workflowLoaded'; -import { addBatchEnqueuedListener } from './listeners/batchEnqueued'; -import { addControlAdapterAddedOrEnabledListener } from './listeners/controlAdapterAddedOrEnabled'; export const listenerMiddleware = createListenerMiddleware(); @@ -200,7 +199,3 @@ addTabChangedListener(); // Dynamic prompts addDynamicPromptsListener(); - -// Display toast when controlnet or t2i adapter enabled -// TODO: Remove when they can both be enabled at same time -addControlAdapterAddedOrEnabledListener(); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts deleted file mode 100644 index bc5387c1fb..0000000000 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts +++ /dev/null @@ -1,87 +0,0 @@ -import { isAnyOf } from '@reduxjs/toolkit'; -import { - controlAdapterAdded, - controlAdapterAddedFromImage, - controlAdapterIsEnabledChanged, - controlAdapterRecalled, - selectControlAdapterAll, - selectControlAdapterById, -} from 'features/controlAdapters/store/controlAdaptersSlice'; -import { ControlAdapterType } from 'features/controlAdapters/store/types'; -import { addToast } from 'features/system/store/systemSlice'; -import i18n from 'i18n'; -import { startAppListening } from '..'; - -const isAnyControlAdapterAddedOrEnabled = isAnyOf( - controlAdapterAdded, - controlAdapterAddedFromImage, - controlAdapterRecalled, - controlAdapterIsEnabledChanged -); - -/** - * Until we can have both controlnet and t2i adapter enabled at once, they are mutually exclusive - * This displays a toast when one is enabled and the other is already enabled, or one is added - * with the other enabled - */ -export const addControlAdapterAddedOrEnabledListener = () => { - startAppListening({ - matcher: isAnyControlAdapterAddedOrEnabled, - effect: async (action, { dispatch, getOriginalState }) => { - const controlAdapters = getOriginalState().controlAdapters; - - const hasEnabledControlNets = selectControlAdapterAll( - controlAdapters - ).some((ca) => ca.isEnabled && ca.type === 'controlnet'); - - const hasEnabledT2IAdapters = selectControlAdapterAll( - controlAdapters - ).some((ca) => ca.isEnabled && ca.type === 't2i_adapter'); - - let caType: ControlAdapterType | null = null; - - if (controlAdapterAdded.match(action)) { - caType = action.payload.type; - } - - if (controlAdapterAddedFromImage.match(action)) { - caType = action.payload.type; - } - - if (controlAdapterRecalled.match(action)) { - caType = action.payload.type; - } - - if (controlAdapterIsEnabledChanged.match(action)) { - const _caType = selectControlAdapterById( - controlAdapters, - action.payload.id - )?.type; - if (!_caType) { - return; - } - caType = _caType; - } - - if ( - (caType === 'controlnet' && hasEnabledT2IAdapters) || - (caType === 't2i_adapter' && hasEnabledControlNets) - ) { - const title = - caType === 'controlnet' - ? i18n.t('controlnet.controlNetEnabledT2IDisabled') - : i18n.t('controlnet.t2iEnabledControlNetDisabled'); - - const description = i18n.t('controlnet.controlNetT2IMutexDesc'); - - dispatch( - addToast({ - title, - description, - status: 'warning', - }) - ); - } - }, - }); -}; diff --git a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts index a3645fad9d..9e293f1104 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts +++ b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts @@ -88,61 +88,6 @@ export const selectValidT2IAdapters = (controlAdapters: ControlAdaptersState) => (ca.processorType === 'none' && Boolean(ca.controlImage))) ); -// TODO: I think we can safely remove this? -// const disableAllIPAdapters = ( -// state: ControlAdaptersState, -// exclude?: string -// ) => { -// const updates: Update[] = selectAllIPAdapters(state) -// .filter((ca) => ca.id !== exclude) -// .map((ca) => ({ -// id: ca.id, -// changes: { isEnabled: false }, -// })); -// caAdapter.updateMany(state, updates); -// }; - -const disableAllControlNets = ( - state: ControlAdaptersState, - exclude?: string -) => { - const updates: Update[] = selectAllControlNets(state) - .filter((ca) => ca.id !== exclude) - .map((ca) => ({ - id: ca.id, - changes: { isEnabled: false }, - })); - caAdapter.updateMany(state, updates); -}; - -const disableAllT2IAdapters = ( - state: ControlAdaptersState, - exclude?: string -) => { - const updates: Update[] = selectAllT2IAdapters(state) - .filter((ca) => ca.id !== exclude) - .map((ca) => ({ - id: ca.id, - changes: { isEnabled: false }, - })); - caAdapter.updateMany(state, updates); -}; - -const disableIncompatibleControlAdapters = ( - state: ControlAdaptersState, - type: ControlAdapterType, - exclude?: string -) => { - if (type === 'controlnet') { - // we cannot do controlnet + t2i adapter, if we are enabled a controlnet, disable all t2is - disableAllT2IAdapters(state, exclude); - } - if (type === 't2i_adapter') { - // we cannot do controlnet + t2i adapter, if we are enabled a t2i, disable controlnets - disableAllControlNets(state, exclude); - } -}; - export const controlAdaptersSlice = createSlice({ name: 'controlAdapters', initialState: initialControlAdapterState, @@ -158,7 +103,6 @@ export const controlAdaptersSlice = createSlice({ ) => { const { id, type, overrides } = action.payload; caAdapter.addOne(state, buildControlAdapter(id, type, overrides)); - disableIncompatibleControlAdapters(state, type, id); }, prepare: ({ type, @@ -175,8 +119,6 @@ export const controlAdaptersSlice = createSlice({ action: PayloadAction ) => { caAdapter.addOne(state, action.payload); - const { type, id } = action.payload; - disableIncompatibleControlAdapters(state, type, id); }, controlAdapterDuplicated: { reducer: ( @@ -196,8 +138,6 @@ export const controlAdaptersSlice = createSlice({ isEnabled: true, }); caAdapter.addOne(state, newControlAdapter); - const { type } = newControlAdapter; - disableIncompatibleControlAdapters(state, type, newId); }, prepare: (id: string) => { return { payload: { id, newId: uuidv4() } }; @@ -217,7 +157,6 @@ export const controlAdaptersSlice = createSlice({ state, buildControlAdapter(id, type, { controlImage }) ); - disableIncompatibleControlAdapters(state, type, id); }, prepare: (payload: { type: ControlAdapterType; @@ -235,12 +174,6 @@ export const controlAdaptersSlice = createSlice({ ) => { const { id, isEnabled } = action.payload; caAdapter.updateOne(state, { id, changes: { isEnabled } }); - if (isEnabled) { - // we are enabling a control adapter. due to limitations in the current system, we may need to disable other adapters - // TODO: disable when multiple IP adapters are supported - const ca = selectControlAdapterById(state, id); - ca && disableIncompatibleControlAdapters(state, ca.type, id); - } }, controlAdapterImageChanged: ( state, diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index d4678dc03b..e0da45c4c9 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -5,6 +5,13 @@ export type paths = { + "/api/v1/sessions/{session_id}": { + /** + * Get Session + * @description Gets a session + */ + get: operations["get_session"]; + }; "/api/v1/utilities/dynamicprompts": { /** * Parse Dynamicprompts @@ -1897,7 +1904,7 @@ export type components = { * Created By * @description The name of the creator of the image */ - created_by: string | null; + created_by?: string | null; /** * Positive Prompt * @description The positive prompt parameter @@ -3035,7 +3042,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["MainModelLoaderInvocation"]; + [key: string]: components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"]; }; /** * Edges @@ -3072,7 +3079,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["String2Output"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SchedulerOutput"]; + [key: string]: components["schemas"]["FaceOffOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["String2Output"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ImageCollectionOutput"]; }; /** * Errors @@ -9139,11 +9146,11 @@ export type components = { ui_order: number | null; }; /** - * StableDiffusionOnnxModelFormat + * IPAdapterModelFormat * @description An enumeration. * @enum {string} */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; + IPAdapterModelFormat: "invokeai"; /** * StableDiffusion2ModelFormat * @description An enumeration. @@ -9156,36 +9163,36 @@ export type components = { * @enum {string} */ StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; - /** - * ControlNetModelFormat - * @description An enumeration. - * @enum {string} - */ - ControlNetModelFormat: "checkpoint" | "diffusers"; /** * CLIPVisionModelFormat * @description An enumeration. * @enum {string} */ CLIPVisionModelFormat: "diffusers"; + /** + * StableDiffusionOnnxModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionOnnxModelFormat: "olive" | "onnx"; /** * StableDiffusionXLModelFormat * @description An enumeration. * @enum {string} */ StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; - /** - * IPAdapterModelFormat - * @description An enumeration. - * @enum {string} - */ - IPAdapterModelFormat: "invokeai"; /** * T2IAdapterModelFormat * @description An enumeration. * @enum {string} */ T2IAdapterModelFormat: "diffusers"; + /** + * ControlNetModelFormat + * @description An enumeration. + * @enum {string} + */ + ControlNetModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; @@ -9200,6 +9207,36 @@ export type external = Record; export type operations = { + /** + * Get Session + * @description Gets a session + */ + get_session: { + parameters: { + path: { + /** @description The id of the session to get */ + session_id: string; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["GraphExecutionState"]; + }; + }; + /** @description Session not found */ + 404: { + content: never; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; /** * Parse Dynamicprompts * @description Creates a batch process From bdf4c4944cf0e9e827ee026fddfd1bd74f3c40e0 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 19:56:52 +1100 Subject: [PATCH 14/24] Revert "feat(ui): remove special handling for t2i vs controlnet" This reverts commit b14699355317fdaf1eaff03b36a5ce85fedb2943. --- .../middleware/listenerMiddleware/index.ts | 7 +- .../listeners/controlAdapterAddedOrEnabled.ts | 87 +++++++++++++++++++ .../store/controlAdaptersSlice.ts | 67 ++++++++++++++ .../frontend/web/src/services/api/schema.d.ts | 71 ++++----------- 4 files changed, 177 insertions(+), 55 deletions(-) create mode 100644 invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts index 772ea216c0..cbc88966a7 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts @@ -12,7 +12,6 @@ import { addFirstListImagesListener } from './listeners/addFirstListImagesListen import { addAnyEnqueuedListener } from './listeners/anyEnqueued'; import { addAppConfigReceivedListener } from './listeners/appConfigReceived'; import { addAppStartedListener } from './listeners/appStarted'; -import { addBatchEnqueuedListener } from './listeners/batchEnqueued'; import { addDeleteBoardAndImagesFulfilledListener } from './listeners/boardAndImagesDeleted'; import { addBoardIdSelectedListener } from './listeners/boardIdSelected'; import { addCanvasCopiedToClipboardListener } from './listeners/canvasCopiedToClipboard'; @@ -72,6 +71,8 @@ import { addStagingAreaImageSavedListener } from './listeners/stagingAreaImageSa import { addTabChangedListener } from './listeners/tabChanged'; import { addUpscaleRequestedListener } from './listeners/upscaleRequested'; import { addWorkflowLoadedListener } from './listeners/workflowLoaded'; +import { addBatchEnqueuedListener } from './listeners/batchEnqueued'; +import { addControlAdapterAddedOrEnabledListener } from './listeners/controlAdapterAddedOrEnabled'; export const listenerMiddleware = createListenerMiddleware(); @@ -199,3 +200,7 @@ addTabChangedListener(); // Dynamic prompts addDynamicPromptsListener(); + +// Display toast when controlnet or t2i adapter enabled +// TODO: Remove when they can both be enabled at same time +addControlAdapterAddedOrEnabledListener(); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts new file mode 100644 index 0000000000..bc5387c1fb --- /dev/null +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts @@ -0,0 +1,87 @@ +import { isAnyOf } from '@reduxjs/toolkit'; +import { + controlAdapterAdded, + controlAdapterAddedFromImage, + controlAdapterIsEnabledChanged, + controlAdapterRecalled, + selectControlAdapterAll, + selectControlAdapterById, +} from 'features/controlAdapters/store/controlAdaptersSlice'; +import { ControlAdapterType } from 'features/controlAdapters/store/types'; +import { addToast } from 'features/system/store/systemSlice'; +import i18n from 'i18n'; +import { startAppListening } from '..'; + +const isAnyControlAdapterAddedOrEnabled = isAnyOf( + controlAdapterAdded, + controlAdapterAddedFromImage, + controlAdapterRecalled, + controlAdapterIsEnabledChanged +); + +/** + * Until we can have both controlnet and t2i adapter enabled at once, they are mutually exclusive + * This displays a toast when one is enabled and the other is already enabled, or one is added + * with the other enabled + */ +export const addControlAdapterAddedOrEnabledListener = () => { + startAppListening({ + matcher: isAnyControlAdapterAddedOrEnabled, + effect: async (action, { dispatch, getOriginalState }) => { + const controlAdapters = getOriginalState().controlAdapters; + + const hasEnabledControlNets = selectControlAdapterAll( + controlAdapters + ).some((ca) => ca.isEnabled && ca.type === 'controlnet'); + + const hasEnabledT2IAdapters = selectControlAdapterAll( + controlAdapters + ).some((ca) => ca.isEnabled && ca.type === 't2i_adapter'); + + let caType: ControlAdapterType | null = null; + + if (controlAdapterAdded.match(action)) { + caType = action.payload.type; + } + + if (controlAdapterAddedFromImage.match(action)) { + caType = action.payload.type; + } + + if (controlAdapterRecalled.match(action)) { + caType = action.payload.type; + } + + if (controlAdapterIsEnabledChanged.match(action)) { + const _caType = selectControlAdapterById( + controlAdapters, + action.payload.id + )?.type; + if (!_caType) { + return; + } + caType = _caType; + } + + if ( + (caType === 'controlnet' && hasEnabledT2IAdapters) || + (caType === 't2i_adapter' && hasEnabledControlNets) + ) { + const title = + caType === 'controlnet' + ? i18n.t('controlnet.controlNetEnabledT2IDisabled') + : i18n.t('controlnet.t2iEnabledControlNetDisabled'); + + const description = i18n.t('controlnet.controlNetT2IMutexDesc'); + + dispatch( + addToast({ + title, + description, + status: 'warning', + }) + ); + } + }, + }); +}; diff --git a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts index 9e293f1104..a3645fad9d 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts +++ b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts @@ -88,6 +88,61 @@ export const selectValidT2IAdapters = (controlAdapters: ControlAdaptersState) => (ca.processorType === 'none' && Boolean(ca.controlImage))) ); +// TODO: I think we can safely remove this? +// const disableAllIPAdapters = ( +// state: ControlAdaptersState, +// exclude?: string +// ) => { +// const updates: Update[] = selectAllIPAdapters(state) +// .filter((ca) => ca.id !== exclude) +// .map((ca) => ({ +// id: ca.id, +// changes: { isEnabled: false }, +// })); +// caAdapter.updateMany(state, updates); +// }; + +const disableAllControlNets = ( + state: ControlAdaptersState, + exclude?: string +) => { + const updates: Update[] = selectAllControlNets(state) + .filter((ca) => ca.id !== exclude) + .map((ca) => ({ + id: ca.id, + changes: { isEnabled: false }, + })); + caAdapter.updateMany(state, updates); +}; + +const disableAllT2IAdapters = ( + state: ControlAdaptersState, + exclude?: string +) => { + const updates: Update[] = selectAllT2IAdapters(state) + .filter((ca) => ca.id !== exclude) + .map((ca) => ({ + id: ca.id, + changes: { isEnabled: false }, + })); + caAdapter.updateMany(state, updates); +}; + +const disableIncompatibleControlAdapters = ( + state: ControlAdaptersState, + type: ControlAdapterType, + exclude?: string +) => { + if (type === 'controlnet') { + // we cannot do controlnet + t2i adapter, if we are enabled a controlnet, disable all t2is + disableAllT2IAdapters(state, exclude); + } + if (type === 't2i_adapter') { + // we cannot do controlnet + t2i adapter, if we are enabled a t2i, disable controlnets + disableAllControlNets(state, exclude); + } +}; + export const controlAdaptersSlice = createSlice({ name: 'controlAdapters', initialState: initialControlAdapterState, @@ -103,6 +158,7 @@ export const controlAdaptersSlice = createSlice({ ) => { const { id, type, overrides } = action.payload; caAdapter.addOne(state, buildControlAdapter(id, type, overrides)); + disableIncompatibleControlAdapters(state, type, id); }, prepare: ({ type, @@ -119,6 +175,8 @@ export const controlAdaptersSlice = createSlice({ action: PayloadAction ) => { caAdapter.addOne(state, action.payload); + const { type, id } = action.payload; + disableIncompatibleControlAdapters(state, type, id); }, controlAdapterDuplicated: { reducer: ( @@ -138,6 +196,8 @@ export const controlAdaptersSlice = createSlice({ isEnabled: true, }); caAdapter.addOne(state, newControlAdapter); + const { type } = newControlAdapter; + disableIncompatibleControlAdapters(state, type, newId); }, prepare: (id: string) => { return { payload: { id, newId: uuidv4() } }; @@ -157,6 +217,7 @@ export const controlAdaptersSlice = createSlice({ state, buildControlAdapter(id, type, { controlImage }) ); + disableIncompatibleControlAdapters(state, type, id); }, prepare: (payload: { type: ControlAdapterType; @@ -174,6 +235,12 @@ export const controlAdaptersSlice = createSlice({ ) => { const { id, isEnabled } = action.payload; caAdapter.updateOne(state, { id, changes: { isEnabled } }); + if (isEnabled) { + // we are enabling a control adapter. due to limitations in the current system, we may need to disable other adapters + // TODO: disable when multiple IP adapters are supported + const ca = selectControlAdapterById(state, id); + ca && disableIncompatibleControlAdapters(state, ca.type, id); + } }, controlAdapterImageChanged: ( state, diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index e0da45c4c9..d4678dc03b 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -5,13 +5,6 @@ export type paths = { - "/api/v1/sessions/{session_id}": { - /** - * Get Session - * @description Gets a session - */ - get: operations["get_session"]; - }; "/api/v1/utilities/dynamicprompts": { /** * Parse Dynamicprompts @@ -1904,7 +1897,7 @@ export type components = { * Created By * @description The name of the creator of the image */ - created_by?: string | null; + created_by: string | null; /** * Positive Prompt * @description The positive prompt parameter @@ -3042,7 +3035,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"]; + [key: string]: components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["MainModelLoaderInvocation"]; }; /** * Edges @@ -3079,7 +3072,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["FaceOffOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["String2Output"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ImageCollectionOutput"]; + [key: string]: components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["String2Output"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SchedulerOutput"]; }; /** * Errors @@ -9146,11 +9139,11 @@ export type components = { ui_order: number | null; }; /** - * IPAdapterModelFormat + * StableDiffusionOnnxModelFormat * @description An enumeration. * @enum {string} */ - IPAdapterModelFormat: "invokeai"; + StableDiffusionOnnxModelFormat: "olive" | "onnx"; /** * StableDiffusion2ModelFormat * @description An enumeration. @@ -9163,36 +9156,36 @@ export type components = { * @enum {string} */ StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; + /** + * ControlNetModelFormat + * @description An enumeration. + * @enum {string} + */ + ControlNetModelFormat: "checkpoint" | "diffusers"; /** * CLIPVisionModelFormat * @description An enumeration. * @enum {string} */ CLIPVisionModelFormat: "diffusers"; - /** - * StableDiffusionOnnxModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; /** * StableDiffusionXLModelFormat * @description An enumeration. * @enum {string} */ StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; + /** + * IPAdapterModelFormat + * @description An enumeration. + * @enum {string} + */ + IPAdapterModelFormat: "invokeai"; /** * T2IAdapterModelFormat * @description An enumeration. * @enum {string} */ T2IAdapterModelFormat: "diffusers"; - /** - * ControlNetModelFormat - * @description An enumeration. - * @enum {string} - */ - ControlNetModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; @@ -9207,36 +9200,6 @@ export type external = Record; export type operations = { - /** - * Get Session - * @description Gets a session - */ - get_session: { - parameters: { - path: { - /** @description The id of the session to get */ - session_id: string; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["GraphExecutionState"]; - }; - }; - /** @description Session not found */ - 404: { - content: never; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; /** * Parse Dynamicprompts * @description Creates a batch process From 38e7eb8878aa213426764b5aaa27532ef2b9db8a Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 19:56:52 +1100 Subject: [PATCH 15/24] Revert "chore: lint" This reverts commit fff29d663db391307db82a84c1a7af644d5b6d45. --- invokeai/backend/stable_diffusion/diffusers_pipeline.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 1b65326f6e..5681a04695 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -548,8 +548,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): mid_block_additional_residual = None down_intrablock_additional_residuals = None # if control_data is not None and t2i_adapter_data is not None: - # TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility - # between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers. + # TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility + # between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers. # raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).") # elif control_data is not None: if control_data is not None: @@ -598,8 +598,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): conditioning_data=conditioning_data, # extra: down_block_additional_residuals=down_block_additional_residuals, # for ControlNet - mid_block_additional_residual=mid_block_additional_residual, # for ControlNet - down_intrablock_additional_residuals=down_intrablock_additional_residuals, # for T2I-Adapter + mid_block_additional_residual=mid_block_additional_residual, # for ControlNet + down_intrablock_additional_residuals=down_intrablock_additional_residuals, # for T2I-Adapter ) guidance_scale = conditioning_data.guidance_scale From 6e697b7b6f0a7d7ae24455e15c15baa1bc0adaf6 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 19:56:52 +1100 Subject: [PATCH 16/24] Revert "Cleaning up (removing diagnostic prints)" This reverts commit 06f8a3276d96c4d4f4f316d5714fcc885a77c5d2. --- .../stable_diffusion/diffusion/shared_invokeai_diffusion.py | 1 + 1 file changed, 1 insertion(+) diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index d2af522496..ef0f3ee261 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -411,6 +411,7 @@ class InvokeAIDiffuserComponent: if down_intrablock_additional_residuals is not None: uncond_down_intrablock, cond_down_intrablock = [], [] for down_intrablock in down_intrablock_additional_residuals: + print("down_intrablock shape: ", down_intrablock.shape) _uncond_down, _cond_down = down_intrablock.chunk(2) uncond_down_intrablock.append(_uncond_down) cond_down_intrablock.append(_cond_down) From c04fb451ee4603cad7751f890a0b3047c15df5b3 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 19:56:52 +1100 Subject: [PATCH 17/24] Revert "Changes to _apply_standard_conditioning_sequentially() and _apply_cross_attention_controlled_conditioning() to reflect changes to T2I-Adapter implementation to allow usage of T2I-Adapter and ControlNet at the same time." This reverts commit 378689a519eaebb0181c38d0ad8faf6e747ceb16. --- .../diffusion/shared_invokeai_diffusion.py | 24 +------------------ 1 file changed, 1 insertion(+), 23 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index ef0f3ee261..c12c86ed92 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -260,6 +260,7 @@ class InvokeAIDiffuserComponent: conditioning_data, **kwargs, ) + else: ( unconditioned_next_x, @@ -406,16 +407,6 @@ class InvokeAIDiffuserComponent: uncond_down_block.append(_uncond_down) cond_down_block.append(_cond_down) - uncond_down_intrablock, cond_down_intrablock = None, None - down_intrablock_additional_residuals = kwargs.pop("down_intrablock_additional_residuals", None) - if down_intrablock_additional_residuals is not None: - uncond_down_intrablock, cond_down_intrablock = [], [] - for down_intrablock in down_intrablock_additional_residuals: - print("down_intrablock shape: ", down_intrablock.shape) - _uncond_down, _cond_down = down_intrablock.chunk(2) - uncond_down_intrablock.append(_uncond_down) - cond_down_intrablock.append(_cond_down) - uncond_mid_block, cond_mid_block = None, None mid_block_additional_residual = kwargs.pop("mid_block_additional_residual", None) if mid_block_additional_residual is not None: @@ -446,7 +437,6 @@ class InvokeAIDiffuserComponent: cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=uncond_down_block, mid_block_additional_residual=uncond_mid_block, - down_intrablock_additional_residuals=uncond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -475,7 +465,6 @@ class InvokeAIDiffuserComponent: cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=cond_down_block, mid_block_additional_residual=cond_mid_block, - down_intrablock_additional_residuals=cond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -500,15 +489,6 @@ class InvokeAIDiffuserComponent: uncond_down_block.append(_uncond_down) cond_down_block.append(_cond_down) - uncond_down_intrablock, cond_down_intrablock = None, None - down_intrablock_additional_residuals = kwargs.pop("down_intrablock_additional_residuals", None) - if down_intrablock_additional_residuals is not None: - uncond_down_intrablock, cond_down_intrablock = [], [] - for down_intrablock in down_intrablock_additional_residuals: - _uncond_down, _cond_down = down_intrablock.chunk(2) - uncond_down_intrablock.append(_uncond_down) - cond_down_intrablock.append(_cond_down) - uncond_mid_block, cond_mid_block = None, None mid_block_additional_residual = kwargs.pop("mid_block_additional_residual", None) if mid_block_additional_residual is not None: @@ -537,7 +517,6 @@ class InvokeAIDiffuserComponent: {"swap_cross_attn_context": cross_attn_processor_context}, down_block_additional_residuals=uncond_down_block, mid_block_additional_residual=uncond_mid_block, - down_intrablock_additional_residuals=uncond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -557,7 +536,6 @@ class InvokeAIDiffuserComponent: {"swap_cross_attn_context": cross_attn_processor_context}, down_block_additional_residuals=cond_down_block, mid_block_additional_residual=cond_mid_block, - down_intrablock_additional_residuals=cond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) From 58a0709c1ea19293e49f6c88d8836f0378a982cd Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 19:56:52 +1100 Subject: [PATCH 18/24] Revert "Fixing some var and arg names." This reverts commit f11ba81a8d5e200c2985658ad05d1b6f0ad4f593. --- .../stable_diffusion/diffusers_pipeline.py | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 5681a04695..0943b78bf8 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -546,13 +546,11 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): # Handle ControlNet(s) and T2I-Adapter(s) down_block_additional_residuals = None mid_block_additional_residual = None - down_intrablock_additional_residuals = None - # if control_data is not None and t2i_adapter_data is not None: + if control_data is not None and t2i_adapter_data is not None: # TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility # between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers. - # raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).") - # elif control_data is not None: - if control_data is not None: + raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).") + elif control_data is not None: down_block_additional_residuals, mid_block_additional_residual = self.invokeai_diffuser.do_controlnet_step( control_data=control_data, sample=latent_model_input, @@ -561,8 +559,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): total_step_count=total_step_count, conditioning_data=conditioning_data, ) - # elif t2i_adapter_data is not None: - if t2i_adapter_data is not None: + elif t2i_adapter_data is not None: accum_adapter_state = None for single_t2i_adapter_data in t2i_adapter_data: # Determine the T2I-Adapter weights for the current denoising step. @@ -587,8 +584,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): for idx, value in enumerate(single_t2i_adapter_data.adapter_state): accum_adapter_state[idx] += value * t2i_adapter_weight - # down_block_additional_residuals = accum_adapter_state - down_intrablock_additional_residuals = accum_adapter_state + down_block_additional_residuals = accum_adapter_state uc_noise_pred, c_noise_pred = self.invokeai_diffuser.do_unet_step( sample=latent_model_input, @@ -597,9 +593,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): total_step_count=total_step_count, conditioning_data=conditioning_data, # extra: - down_block_additional_residuals=down_block_additional_residuals, # for ControlNet - mid_block_additional_residual=mid_block_additional_residual, # for ControlNet - down_intrablock_additional_residuals=down_intrablock_additional_residuals, # for T2I-Adapter + down_block_additional_residuals=down_block_additional_residuals, + mid_block_additional_residual=mid_block_additional_residual, ) guidance_scale = conditioning_data.guidance_scale From 284a257c2531195b3b25138eb959325a9c434ef2 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 05:00:40 +1100 Subject: [PATCH 19/24] feat: remove `enqueue_graph` routes/methods (#4922) This is totally extraneous - it's almost identical to `enqueue_batch`. --- invokeai/app/api/routers/session_queue.py | 19 --- .../session_queue/session_queue_base.py | 7 - .../session_queue/session_queue_common.py | 8 - .../session_queue/session_queue_sqlite.py | 28 --- .../listeners/anyEnqueued.ts | 8 +- .../listeners/controlNetImageProcessed.ts | 84 ++++----- .../listeners/upscaleRequested.ts | 32 ++-- .../hooks/useIsQueueMutationInProgress.ts | 6 - .../web/src/services/api/endpoints/queue.ts | 25 --- .../frontend/web/src/services/api/schema.d.ts | 160 +++++++----------- .../frontend/web/src/services/api/types.ts | 1 - 11 files changed, 126 insertions(+), 252 deletions(-) diff --git a/invokeai/app/api/routers/session_queue.py b/invokeai/app/api/routers/session_queue.py index 7ecb0504a3..40f1f2213b 100644 --- a/invokeai/app/api/routers/session_queue.py +++ b/invokeai/app/api/routers/session_queue.py @@ -12,13 +12,11 @@ from invokeai.app.services.session_queue.session_queue_common import ( CancelByBatchIDsResult, ClearResult, EnqueueBatchResult, - EnqueueGraphResult, PruneResult, SessionQueueItem, SessionQueueItemDTO, SessionQueueStatus, ) -from invokeai.app.services.shared.graph import Graph from invokeai.app.services.shared.pagination import CursorPaginatedResults from ..dependencies import ApiDependencies @@ -33,23 +31,6 @@ class SessionQueueAndProcessorStatus(BaseModel): processor: SessionProcessorStatus -@session_queue_router.post( - "/{queue_id}/enqueue_graph", - operation_id="enqueue_graph", - responses={ - 201: {"model": EnqueueGraphResult}, - }, -) -async def enqueue_graph( - queue_id: str = Path(description="The queue id to perform this operation on"), - graph: Graph = Body(description="The graph to enqueue"), - prepend: bool = Body(default=False, description="Whether or not to prepend this batch in the queue"), -) -> EnqueueGraphResult: - """Enqueues a graph for single execution.""" - - return ApiDependencies.invoker.services.session_queue.enqueue_graph(queue_id=queue_id, graph=graph, prepend=prepend) - - @session_queue_router.post( "/{queue_id}/enqueue_batch", operation_id="enqueue_batch", diff --git a/invokeai/app/services/session_queue/session_queue_base.py b/invokeai/app/services/session_queue/session_queue_base.py index b5272f1868..e0b6e4f528 100644 --- a/invokeai/app/services/session_queue/session_queue_base.py +++ b/invokeai/app/services/session_queue/session_queue_base.py @@ -9,7 +9,6 @@ from invokeai.app.services.session_queue.session_queue_common import ( CancelByQueueIDResult, ClearResult, EnqueueBatchResult, - EnqueueGraphResult, IsEmptyResult, IsFullResult, PruneResult, @@ -17,7 +16,6 @@ from invokeai.app.services.session_queue.session_queue_common import ( SessionQueueItemDTO, SessionQueueStatus, ) -from invokeai.app.services.shared.graph import Graph from invokeai.app.services.shared.pagination import CursorPaginatedResults @@ -29,11 +27,6 @@ class SessionQueueBase(ABC): """Dequeues the next session queue item.""" pass - @abstractmethod - def enqueue_graph(self, queue_id: str, graph: Graph, prepend: bool) -> EnqueueGraphResult: - """Enqueues a single graph for execution.""" - pass - @abstractmethod def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult: """Enqueues all permutations of a batch for execution.""" diff --git a/invokeai/app/services/session_queue/session_queue_common.py b/invokeai/app/services/session_queue/session_queue_common.py index 48e1da83b5..cbf2154b66 100644 --- a/invokeai/app/services/session_queue/session_queue_common.py +++ b/invokeai/app/services/session_queue/session_queue_common.py @@ -276,14 +276,6 @@ class EnqueueBatchResult(BaseModel): priority: int = Field(description="The priority of the enqueued batch") -class EnqueueGraphResult(BaseModel): - enqueued: int = Field(description="The total number of queue items enqueued") - requested: int = Field(description="The total number of queue items requested to be enqueued") - batch: Batch = Field(description="The batch that was enqueued") - priority: int = Field(description="The priority of the enqueued batch") - queue_item: SessionQueueItemDTO = Field(description="The queue item that was enqueued") - - class ClearResult(BaseModel): """Result of clearing the session queue""" diff --git a/invokeai/app/services/session_queue/session_queue_sqlite.py b/invokeai/app/services/session_queue/session_queue_sqlite.py index 4daab9cdbc..7259a7bd0c 100644 --- a/invokeai/app/services/session_queue/session_queue_sqlite.py +++ b/invokeai/app/services/session_queue/session_queue_sqlite.py @@ -17,7 +17,6 @@ from invokeai.app.services.session_queue.session_queue_common import ( CancelByQueueIDResult, ClearResult, EnqueueBatchResult, - EnqueueGraphResult, IsEmptyResult, IsFullResult, PruneResult, @@ -28,7 +27,6 @@ from invokeai.app.services.session_queue.session_queue_common import ( calc_session_count, prepare_values_to_insert, ) -from invokeai.app.services.shared.graph import Graph from invokeai.app.services.shared.pagination import CursorPaginatedResults from invokeai.app.services.shared.sqlite import SqliteDatabase @@ -255,32 +253,6 @@ class SqliteSessionQueue(SessionQueueBase): ) return cast(Union[int, None], self.__cursor.fetchone()[0]) or 0 - def enqueue_graph(self, queue_id: str, graph: Graph, prepend: bool) -> EnqueueGraphResult: - enqueue_result = self.enqueue_batch(queue_id=queue_id, batch=Batch(graph=graph), prepend=prepend) - try: - self.__lock.acquire() - self.__cursor.execute( - """--sql - SELECT * - FROM session_queue - WHERE queue_id = ? - AND batch_id = ? - """, - (queue_id, enqueue_result.batch.batch_id), - ) - result = cast(Union[sqlite3.Row, None], self.__cursor.fetchone()) - except Exception: - self.__conn.rollback() - raise - finally: - self.__lock.release() - if result is None: - raise SessionQueueItemNotFoundError(f"No queue item with batch id {enqueue_result.batch.batch_id}") - return EnqueueGraphResult( - **enqueue_result.model_dump(), - queue_item=SessionQueueItemDTO.queue_item_dto_from_dict(dict(result)), - ) - def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult: try: self.__lock.acquire() diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/anyEnqueued.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/anyEnqueued.ts index ff11491b53..3f0e3342f9 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/anyEnqueued.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/anyEnqueued.ts @@ -1,15 +1,9 @@ -import { isAnyOf } from '@reduxjs/toolkit'; import { queueApi } from 'services/api/endpoints/queue'; import { startAppListening } from '..'; -const matcher = isAnyOf( - queueApi.endpoints.enqueueBatch.matchFulfilled, - queueApi.endpoints.enqueueGraph.matchFulfilled -); - export const addAnyEnqueuedListener = () => { startAppListening({ - matcher, + matcher: queueApi.endpoints.enqueueBatch.matchFulfilled, effect: async (_, { dispatch, getState }) => { const { data } = queueApi.endpoints.getQueueStatus.select()(getState()); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts index f3db0ea65f..a454e5ca48 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts @@ -1,22 +1,22 @@ import { logger } from 'app/logging/logger'; import { parseify } from 'common/util/serialize'; +import { controlAdapterImageProcessed } from 'features/controlAdapters/store/actions'; import { - pendingControlImagesCleared, controlAdapterImageChanged, - selectControlAdapterById, controlAdapterProcessedImageChanged, + pendingControlImagesCleared, + selectControlAdapterById, } from 'features/controlAdapters/store/controlAdaptersSlice'; +import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types'; import { SAVE_IMAGE } from 'features/nodes/util/graphBuilders/constants'; import { addToast } from 'features/system/store/systemSlice'; import { t } from 'i18next'; import { imagesApi } from 'services/api/endpoints/images'; import { queueApi } from 'services/api/endpoints/queue'; import { isImageOutput } from 'services/api/guards'; -import { Graph, ImageDTO } from 'services/api/types'; +import { BatchConfig, ImageDTO } from 'services/api/types'; import { socketInvocationComplete } from 'services/events/actions'; import { startAppListening } from '..'; -import { controlAdapterImageProcessed } from 'features/controlAdapters/store/actions'; -import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types'; export const addControlNetImageProcessedListener = () => { startAppListening({ @@ -37,41 +37,46 @@ export const addControlNetImageProcessedListener = () => { // ControlNet one-off procressing graph is just the processor node, no edges. // Also we need to grab the image. - const graph: Graph = { - nodes: { - [ca.processorNode.id]: { - ...ca.processorNode, - is_intermediate: true, - image: { image_name: ca.controlImage }, - }, - [SAVE_IMAGE]: { - id: SAVE_IMAGE, - type: 'save_image', - is_intermediate: true, - use_cache: false, + + const enqueueBatchArg: BatchConfig = { + prepend: true, + batch: { + graph: { + nodes: { + [ca.processorNode.id]: { + ...ca.processorNode, + is_intermediate: true, + image: { image_name: ca.controlImage }, + }, + [SAVE_IMAGE]: { + id: SAVE_IMAGE, + type: 'save_image', + is_intermediate: true, + use_cache: false, + }, + }, + edges: [ + { + source: { + node_id: ca.processorNode.id, + field: 'image', + }, + destination: { + node_id: SAVE_IMAGE, + field: 'image', + }, + }, + ], }, + runs: 1, }, - edges: [ - { - source: { - node_id: ca.processorNode.id, - field: 'image', - }, - destination: { - node_id: SAVE_IMAGE, - field: 'image', - }, - }, - ], }; + try { const req = dispatch( - queueApi.endpoints.enqueueGraph.initiate( - { graph, prepend: true }, - { - fixedCacheKey: 'enqueueGraph', - } - ) + queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, { + fixedCacheKey: 'enqueueBatch', + }) ); const enqueueResult = await req.unwrap(); req.reset(); @@ -83,8 +88,8 @@ export const addControlNetImageProcessedListener = () => { const [invocationCompleteAction] = await take( (action): action is ReturnType => socketInvocationComplete.match(action) && - action.payload.data.graph_execution_state_id === - enqueueResult.queue_item.session_id && + action.payload.data.queue_batch_id === + enqueueResult.batch.batch_id && action.payload.data.source_node_id === SAVE_IMAGE ); @@ -116,7 +121,10 @@ export const addControlNetImageProcessedListener = () => { ); } } catch (error) { - log.error({ graph: parseify(graph) }, t('queue.graphFailedToQueue')); + log.error( + { enqueueBatchArg: parseify(enqueueBatchArg) }, + t('queue.graphFailedToQueue') + ); // handle usage-related errors if (error instanceof Object) { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/upscaleRequested.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/upscaleRequested.ts index c252f412a6..9ddcdc9701 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/upscaleRequested.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/upscaleRequested.ts @@ -6,7 +6,7 @@ import { addToast } from 'features/system/store/systemSlice'; import { t } from 'i18next'; import { queueApi } from 'services/api/endpoints/queue'; import { startAppListening } from '..'; -import { ImageDTO } from 'services/api/types'; +import { BatchConfig, ImageDTO } from 'services/api/types'; import { createIsAllowedToUpscaleSelector } from 'features/parameters/hooks/useIsAllowedToUpscale'; export const upscaleRequested = createAction<{ imageDTO: ImageDTO }>( @@ -44,20 +44,23 @@ export const addUpscaleRequestedListener = () => { const { esrganModelName } = state.postprocessing; const { autoAddBoardId } = state.gallery; - const graph = buildAdHocUpscaleGraph({ - image_name, - esrganModelName, - autoAddBoardId, - }); + const enqueueBatchArg: BatchConfig = { + prepend: true, + batch: { + graph: buildAdHocUpscaleGraph({ + image_name, + esrganModelName, + autoAddBoardId, + }), + runs: 1, + }, + }; try { const req = dispatch( - queueApi.endpoints.enqueueGraph.initiate( - { graph, prepend: true }, - { - fixedCacheKey: 'enqueueGraph', - } - ) + queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, { + fixedCacheKey: 'enqueueBatch', + }) ); const enqueueResult = await req.unwrap(); @@ -67,7 +70,10 @@ export const addUpscaleRequestedListener = () => { t('queue.graphQueued') ); } catch (error) { - log.error({ graph: parseify(graph) }, t('queue.graphFailedToQueue')); + log.error( + { enqueueBatchArg: parseify(enqueueBatchArg) }, + t('queue.graphFailedToQueue') + ); // handle usage-related errors if (error instanceof Object) { diff --git a/invokeai/frontend/web/src/features/queue/hooks/useIsQueueMutationInProgress.ts b/invokeai/frontend/web/src/features/queue/hooks/useIsQueueMutationInProgress.ts index abb3967b92..9947c17086 100644 --- a/invokeai/frontend/web/src/features/queue/hooks/useIsQueueMutationInProgress.ts +++ b/invokeai/frontend/web/src/features/queue/hooks/useIsQueueMutationInProgress.ts @@ -3,7 +3,6 @@ import { // useCancelByBatchIdsMutation, useClearQueueMutation, useEnqueueBatchMutation, - useEnqueueGraphMutation, usePruneQueueMutation, useResumeProcessorMutation, usePauseProcessorMutation, @@ -14,10 +13,6 @@ export const useIsQueueMutationInProgress = () => { useEnqueueBatchMutation({ fixedCacheKey: 'enqueueBatch', }); - const [_triggerEnqueueGraph, { isLoading: isLoadingEnqueueGraph }] = - useEnqueueGraphMutation({ - fixedCacheKey: 'enqueueGraph', - }); const [_triggerResumeProcessor, { isLoading: isLoadingResumeProcessor }] = useResumeProcessorMutation({ fixedCacheKey: 'resumeProcessor', @@ -44,7 +39,6 @@ export const useIsQueueMutationInProgress = () => { // }); return ( isLoadingEnqueueBatch || - isLoadingEnqueueGraph || isLoadingResumeProcessor || isLoadingPauseProcessor || isLoadingCancelQueue || diff --git a/invokeai/frontend/web/src/services/api/endpoints/queue.ts b/invokeai/frontend/web/src/services/api/endpoints/queue.ts index ab75964e89..d44e333850 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/queue.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/queue.ts @@ -83,30 +83,6 @@ export const queueApi = api.injectEndpoints({ } }, }), - enqueueGraph: build.mutation< - paths['/api/v1/queue/{queue_id}/enqueue_graph']['post']['responses']['201']['content']['application/json'], - paths['/api/v1/queue/{queue_id}/enqueue_graph']['post']['requestBody']['content']['application/json'] - >({ - query: (arg) => ({ - url: `queue/${$queueId.get()}/enqueue_graph`, - body: arg, - method: 'POST', - }), - invalidatesTags: [ - 'SessionQueueStatus', - 'CurrentSessionQueueItem', - 'NextSessionQueueItem', - ], - onQueryStarted: async (arg, api) => { - const { dispatch, queryFulfilled } = api; - try { - await queryFulfilled; - resetListQueryData(dispatch); - } catch { - // no-op - } - }, - }), resumeProcessor: build.mutation< paths['/api/v1/queue/{queue_id}/processor/resume']['put']['responses']['200']['content']['application/json'], void @@ -341,7 +317,6 @@ export const queueApi = api.injectEndpoints({ export const { useCancelByBatchIdsMutation, - useEnqueueGraphMutation, useEnqueueBatchMutation, usePauseProcessorMutation, useResumeProcessorMutation, diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index d4678dc03b..6bc54f0e35 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -5,6 +5,13 @@ export type paths = { + "/api/v1/sessions/{session_id}": { + /** + * Get Session + * @description Gets a session + */ + get: operations["get_session"]; + }; "/api/v1/utilities/dynamicprompts": { /** * Parse Dynamicprompts @@ -275,13 +282,6 @@ export type paths = { */ get: operations["get_invocation_cache_status"]; }; - "/api/v1/queue/{queue_id}/enqueue_graph": { - /** - * Enqueue Graph - * @description Enqueues a graph for single execution. - */ - post: operations["enqueue_graph"]; - }; "/api/v1/queue/{queue_id}/enqueue_batch": { /** * Enqueue Batch @@ -800,17 +800,6 @@ export type components = { */ prepend?: boolean; }; - /** Body_enqueue_graph */ - Body_enqueue_graph: { - /** @description The graph to enqueue */ - graph: components["schemas"]["Graph"]; - /** - * Prepend - * @description Whether or not to prepend this batch in the queue - * @default false - */ - prepend?: boolean; - }; /** Body_import_model */ Body_import_model: { /** @@ -1897,7 +1886,7 @@ export type components = { * Created By * @description The name of the creator of the image */ - created_by: string | null; + created_by?: string | null; /** * Positive Prompt * @description The positive prompt parameter @@ -2476,28 +2465,6 @@ export type components = { */ priority: number; }; - /** EnqueueGraphResult */ - EnqueueGraphResult: { - /** - * Enqueued - * @description The total number of queue items enqueued - */ - enqueued: number; - /** - * Requested - * @description The total number of queue items requested to be enqueued - */ - requested: number; - /** @description The batch that was enqueued */ - batch: components["schemas"]["Batch"]; - /** - * Priority - * @description The priority of the enqueued batch - */ - priority: number; - /** @description The queue item that was enqueued */ - queue_item: components["schemas"]["SessionQueueItemDTO"]; - }; /** * FaceIdentifier * @description Outputs an image with detected face IDs printed on each face. For use with other FaceTools. @@ -3035,7 +3002,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["MainModelLoaderInvocation"]; + [key: string]: components["schemas"]["IntegerInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["InfillColorInvocation"]; }; /** * Edges @@ -3072,7 +3039,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["String2Output"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SchedulerOutput"]; + [key: string]: components["schemas"]["String2Output"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["StringPosNegOutput"]; }; /** * Errors @@ -9138,6 +9105,18 @@ export type components = { /** Ui Order */ ui_order: number | null; }; + /** + * ControlNetModelFormat + * @description An enumeration. + * @enum {string} + */ + ControlNetModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusion1ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusionOnnxModelFormat * @description An enumeration. @@ -9151,41 +9130,29 @@ export type components = { */ StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; /** - * StableDiffusion1ModelFormat + * IPAdapterModelFormat * @description An enumeration. * @enum {string} */ - StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; - /** - * ControlNetModelFormat - * @description An enumeration. - * @enum {string} - */ - ControlNetModelFormat: "checkpoint" | "diffusers"; + IPAdapterModelFormat: "invokeai"; /** * CLIPVisionModelFormat * @description An enumeration. * @enum {string} */ CLIPVisionModelFormat: "diffusers"; - /** - * StableDiffusionXLModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; - /** - * IPAdapterModelFormat - * @description An enumeration. - * @enum {string} - */ - IPAdapterModelFormat: "invokeai"; /** * T2IAdapterModelFormat * @description An enumeration. * @enum {string} */ T2IAdapterModelFormat: "diffusers"; + /** + * StableDiffusionXLModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; @@ -9200,6 +9167,36 @@ export type external = Record; export type operations = { + /** + * Get Session + * @description Gets a session + */ + get_session: { + parameters: { + path: { + /** @description The id of the session to get */ + session_id: string; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + content: { + "application/json": components["schemas"]["GraphExecutionState"]; + }; + }; + /** @description Session not found */ + 404: { + content: never; + }; + /** @description Validation Error */ + 422: { + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; /** * Parse Dynamicprompts * @description Creates a batch process @@ -10309,43 +10306,6 @@ export type operations = { }; }; }; - /** - * Enqueue Graph - * @description Enqueues a graph for single execution. - */ - enqueue_graph: { - parameters: { - path: { - /** @description The queue id to perform this operation on */ - queue_id: string; - }; - }; - requestBody: { - content: { - "application/json": components["schemas"]["Body_enqueue_graph"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - content: { - "application/json": components["schemas"]["EnqueueGraphResult"]; - }; - }; - /** @description Created */ - 201: { - content: { - "application/json": components["schemas"]["EnqueueGraphResult"]; - }; - }; - /** @description Validation Error */ - 422: { - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; /** * Enqueue Batch * @description Processes a batch and enqueues the output graphs for execution. diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index 6fda849b89..63617a4eb5 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -26,7 +26,6 @@ export type BatchConfig = paths['/api/v1/queue/{queue_id}/enqueue_batch']['post']['requestBody']['content']['application/json']; export type EnqueueBatchResult = components['schemas']['EnqueueBatchResult']; -export type EnqueueGraphResult = components['schemas']['EnqueueGraphResult']; /** * This is an unsafe type; the object inside is not guaranteed to be valid. From 975ba6b74f51394f78327431062c960e676cf9a9 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 08:43:56 +1100 Subject: [PATCH 20/24] fix(ui): use pidi processor for sketch --- .../web/src/features/controlAdapters/store/constants.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/controlAdapters/store/constants.ts b/invokeai/frontend/web/src/features/controlAdapters/store/constants.ts index c35847d323..db2311f3f5 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/store/constants.ts +++ b/invokeai/frontend/web/src/features/controlAdapters/store/constants.ts @@ -246,7 +246,7 @@ export const CONTROLNET_MODEL_DEFAULT_PROCESSORS: { mlsd: 'mlsd_image_processor', depth: 'midas_depth_image_processor', bae: 'normalbae_image_processor', - sketch: 'lineart_image_processor', + sketch: 'pidi_image_processor', scribble: 'lineart_image_processor', lineart: 'lineart_image_processor', lineart_anime: 'lineart_anime_image_processor', From 252c9a5f5ab3d3301f8ae912137135a9b66b2c1a Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 17 Oct 2023 16:50:52 +1100 Subject: [PATCH 21/24] fix(backend): fix nsfw/watermarker util types --- .../backend/image_util/invisible_watermark.py | 6 ++--- invokeai/backend/image_util/safety_checker.py | 26 +++++++++---------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/invokeai/backend/image_util/invisible_watermark.py b/invokeai/backend/image_util/invisible_watermark.py index 3e8604f9c3..37b3ca918c 100644 --- a/invokeai/backend/image_util/invisible_watermark.py +++ b/invokeai/backend/image_util/invisible_watermark.py @@ -20,12 +20,12 @@ class InvisibleWatermark: """ @classmethod - def invisible_watermark_available(self) -> bool: + def invisible_watermark_available(cls) -> bool: return config.invisible_watermark @classmethod - def add_watermark(self, image: Image, watermark_text: str) -> Image: - if not self.invisible_watermark_available(): + def add_watermark(cls, image: Image.Image, watermark_text: str) -> Image.Image: + if not cls.invisible_watermark_available(): return image logger.debug(f'Applying invisible watermark "{watermark_text}"') bgr = cv2.cvtColor(np.array(image.convert("RGB")), cv2.COLOR_RGB2BGR) diff --git a/invokeai/backend/image_util/safety_checker.py b/invokeai/backend/image_util/safety_checker.py index fd1f05f10e..b9649925e1 100644 --- a/invokeai/backend/image_util/safety_checker.py +++ b/invokeai/backend/image_util/safety_checker.py @@ -26,8 +26,8 @@ class SafetyChecker: tried_load: bool = False @classmethod - def _load_safety_checker(self): - if self.tried_load: + def _load_safety_checker(cls): + if cls.tried_load: return if config.nsfw_checker: @@ -35,31 +35,31 @@ class SafetyChecker: from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from transformers import AutoFeatureExtractor - self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(config.models_path / CHECKER_PATH) - self.feature_extractor = AutoFeatureExtractor.from_pretrained(config.models_path / CHECKER_PATH) + cls.safety_checker = StableDiffusionSafetyChecker.from_pretrained(config.models_path / CHECKER_PATH) + cls.feature_extractor = AutoFeatureExtractor.from_pretrained(config.models_path / CHECKER_PATH) logger.info("NSFW checker initialized") except Exception as e: logger.warning(f"Could not load NSFW checker: {str(e)}") else: logger.info("NSFW checker loading disabled") - self.tried_load = True + cls.tried_load = True @classmethod - def safety_checker_available(self) -> bool: - self._load_safety_checker() - return self.safety_checker is not None + def safety_checker_available(cls) -> bool: + cls._load_safety_checker() + return cls.safety_checker is not None @classmethod - def has_nsfw_concept(self, image: Image) -> bool: - if not self.safety_checker_available(): + def has_nsfw_concept(cls, image: Image.Image) -> bool: + if not cls.safety_checker_available(): return False device = choose_torch_device() - features = self.feature_extractor([image], return_tensors="pt") + features = cls.feature_extractor([image], return_tensors="pt") features.to(device) - self.safety_checker.to(device) + cls.safety_checker.to(device) x_image = np.array(image).astype(np.float32) / 255.0 x_image = x_image[None].transpose(0, 3, 1, 2) with SilenceWarnings(): - checked_image, has_nsfw_concept = self.safety_checker(images=x_image, clip_input=features.pixel_values) + checked_image, has_nsfw_concept = cls.safety_checker(images=x_image, clip_input=features.pixel_values) return has_nsfw_concept[0] From d27392cc2d9c4ac2ab2b9dd1703cc969daf85884 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 17 Oct 2023 12:59:48 -0400 Subject: [PATCH 22/24] remove all references to CLI --- docs/CHANGELOG.md | 4 +- CODE_OF_CONDUCT.md => docs/CODE_OF_CONDUCT.md | 0 .../contribution_guides/development.md | 2 +- docs/deprecated/CLI.md | 4 +- docs/{other => deprecated}/TRANSLATION.md | 0 docs/{features => deprecated}/VARIATIONS.md | 2 +- docs/features/CONCEPTS.md | 5 +- docs/features/CONFIGURATION.md | 2 +- docs/features/CONTROLNET.md | 6 +- docs/features/MODEL_MERGING.md | 7 +- docs/features/UTILITIES.md | 10 +- docs/features/index.md | 2 +- docs/index.md | 7 +- docs/installation/050_INSTALLING_MODELS.md | 2 +- .../deprecated_documentation/INSTALL_LINUX.md | 4 +- .../deprecated_documentation/INSTALL_MAC.md | 2 +- .../INSTALL_SOURCE.md | 2 +- .../INSTALL_WINDOWS.md | 4 +- installer/templates/invoke.bat.in | 40 +- installer/templates/invoke.sh.in | 61 +-- invokeai/app/cli/__init__.py | 0 invokeai/app/cli/commands.py | 312 ----------- invokeai/app/cli/completer.py | 171 ------- invokeai/app/cli_app.py | 484 ------------------ mkdocs.yml | 9 +- pyproject.toml | 3 +- 26 files changed, 86 insertions(+), 1059 deletions(-) rename CODE_OF_CONDUCT.md => docs/CODE_OF_CONDUCT.md (100%) rename docs/{other => deprecated}/TRANSLATION.md (100%) rename docs/{features => deprecated}/VARIATIONS.md (97%) delete mode 100644 invokeai/app/cli/__init__.py delete mode 100644 invokeai/app/cli/commands.py delete mode 100644 invokeai/app/cli/completer.py delete mode 100644 invokeai/app/cli_app.py diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index ca765b3ca6..24bd5ad7dd 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -488,7 +488,7 @@ sections describe what's new for InvokeAI. - A choice of installer scripts that automate installation and configuration. See - [Installation](installation/index.md). + [Installation](installation/INSTALLATION.md). - A streamlined manual installation process that works for both Conda and PIP-only installs. See [Manual Installation](installation/020_INSTALL_MANUAL.md). @@ -657,7 +657,7 @@ sections describe what's new for InvokeAI. ## v1.13 (3 September 2022) -- Support image variations (see [VARIATIONS](features/VARIATIONS.md) +- Support image variations (see [VARIATIONS](deprecated/VARIATIONS.md) ([Kevin Gibbons](https://github.com/bakkot) and many contributors and reviewers) - Supports a Google Colab notebook for a standalone server running on Google diff --git a/CODE_OF_CONDUCT.md b/docs/CODE_OF_CONDUCT.md similarity index 100% rename from CODE_OF_CONDUCT.md rename to docs/CODE_OF_CONDUCT.md diff --git a/docs/contributing/contribution_guides/development.md b/docs/contributing/contribution_guides/development.md index 086fd6e90d..2f50d7f579 100644 --- a/docs/contributing/contribution_guides/development.md +++ b/docs/contributing/contribution_guides/development.md @@ -45,5 +45,5 @@ For backend related work, please reach out to **@blessedcoolant**, **@lstein**, ## **What does the Code of Conduct mean for me?** -Our [Code of Conduct](CODE_OF_CONDUCT.md) means that you are responsible for treating everyone on the project with respect and courtesy regardless of their identity. If you are the victim of any inappropriate behavior or comments as described in our Code of Conduct, we are here for you and will do the best to ensure that the abuser is reprimanded appropriately, per our code. +Our [Code of Conduct](../../CODE_OF_CONDUCT.md) means that you are responsible for treating everyone on the project with respect and courtesy regardless of their identity. If you are the victim of any inappropriate behavior or comments as described in our Code of Conduct, we are here for you and will do the best to ensure that the abuser is reprimanded appropriately, per our code. diff --git a/docs/deprecated/CLI.md b/docs/deprecated/CLI.md index eaa215c8dd..b40aeffc37 100644 --- a/docs/deprecated/CLI.md +++ b/docs/deprecated/CLI.md @@ -211,8 +211,8 @@ Here are the invoke> command that apply to txt2img: | `--facetool ` | `-ft ` | `-ft gfpgan` | Select face restoration algorithm to use: gfpgan, codeformer | | `--codeformer_fidelity` | `-cf ` | `0.75` | Used along with CodeFormer. Takes values between 0 and 1. 0 produces high quality but low accuracy. 1 produces high accuracy but low quality | | `--save_original` | `-save_orig` | `False` | When upscaling or fixing faces, this will cause the original image to be saved rather than replaced. | -| `--variation ` | `-v` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S` and `-n` to generate a series a riffs on a starting image. See [Variations](../features/VARIATIONS.md). | -| `--with_variations ` | | `None` | Combine two or more variations. See [Variations](../features/VARIATIONS.md) for now to use this. | +| `--variation ` | `-v` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S` and `-n` to generate a series a riffs on a starting image. See [Variations](VARIATIONS.md). | +| `--with_variations ` | | `None` | Combine two or more variations. See [Variations](VARIATIONS.md) for now to use this. | | `--save_intermediates ` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory | | `--h_symmetry_time_pct ` | | `None` | Create symmetry along the X axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) | | `--v_symmetry_time_pct ` | | `None` | Create symmetry along the Y axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) | diff --git a/docs/other/TRANSLATION.md b/docs/deprecated/TRANSLATION.md similarity index 100% rename from docs/other/TRANSLATION.md rename to docs/deprecated/TRANSLATION.md diff --git a/docs/features/VARIATIONS.md b/docs/deprecated/VARIATIONS.md similarity index 97% rename from docs/features/VARIATIONS.md rename to docs/deprecated/VARIATIONS.md index e6e21490c8..0c09b71836 100644 --- a/docs/features/VARIATIONS.md +++ b/docs/deprecated/VARIATIONS.md @@ -126,6 +126,6 @@ amounts of image-to-image variation even when the seed is fixed and the `-v` argument is very low. Others are more deterministic. Feel free to experiment until you find the combination that you like. -Also be aware of the [Perlin Noise](OTHER.md#thresholding-and-perlin-noise-initialization-options) +Also be aware of the [Perlin Noise](../features/OTHER.md#thresholding-and-perlin-noise-initialization-options) feature, which provides another way of introducing variability into your image generation requests. diff --git a/docs/features/CONCEPTS.md b/docs/features/CONCEPTS.md index df9ee5bd26..5f3d2d961f 100644 --- a/docs/features/CONCEPTS.md +++ b/docs/features/CONCEPTS.md @@ -28,8 +28,9 @@ by placing them in the designated directory for the compatible model type ### An Example -Here are a few examples to illustrate how it works. All these images were -generated using the command-line client and the Stable Diffusion 1.5 model: +Here are a few examples to illustrate how it works. All these images +were generated using the legacy command-line client and the Stable +Diffusion 1.5 model: | Japanese gardener | Japanese gardener <ghibli-face> | Japanese gardener <hoi4-leaders> | Japanese gardener <cartoona-animals> | | :--------------------------------: | :-----------------------------------: | :------------------------------------: | :----------------------------------------: | diff --git a/docs/features/CONFIGURATION.md b/docs/features/CONFIGURATION.md index cfd65f8a61..f83caf522d 100644 --- a/docs/features/CONFIGURATION.md +++ b/docs/features/CONFIGURATION.md @@ -82,7 +82,7 @@ format of YAML files can be found [here](https://circleci.com/blog/what-is-yaml-a-beginner-s-guide/). You can fix a broken `invokeai.yaml` by deleting it and running the -configuration script again -- option [7] in the launcher, "Re-run the +configuration script again -- option [6] in the launcher, "Re-run the configure script". #### Reading Environment Variables diff --git a/docs/features/CONTROLNET.md b/docs/features/CONTROLNET.md index 8284ddf75d..d287e6cb19 100644 --- a/docs/features/CONTROLNET.md +++ b/docs/features/CONTROLNET.md @@ -46,7 +46,7 @@ Diffuser-style ControlNet models are available at HuggingFace (http://huggingface.co) and accessed via their repo IDs (identifiers in the format "author/modelname"). The easiest way to install them is to use the InvokeAI model installer application. Use the -`invoke.sh`/`invoke.bat` launcher to select item [5] and then navigate +`invoke.sh`/`invoke.bat` launcher to select item [4] and then navigate to the CONTROLNETS section. Select the models you wish to install and press "APPLY CHANGES". You may also enter additional HuggingFace repo_ids in the "Additional models" textbox: @@ -145,8 +145,8 @@ Additionally, each ControlNet section can be expanded in order to manipulate set #### Installation There are several ways to install IP-Adapter models with an existing InvokeAI installation: -1. Through the command line interface launched from the invoke.sh / invoke.bat scripts, option [5] to download models. -2. Through the Model Manager UI with models from the *Tools* section of [www.models.invoke.ai](www.models.invoke.ai). To do this, copy the repo ID from the desired model page, and paste it in the Add Model field of the model manager. **Note** Both the IP-Adapter and the Image Encoder must be installed for IP-Adapter to work. For example, the [SD 1.5 IP-Adapter](https://models.invoke.ai/InvokeAI/ip_adapter_plus_sd15) and [SD1.5 Image Encoder](https://models.invoke.ai/InvokeAI/ip_adapter_sd_image_encoder) must be installed to use IP-Adapter with SD1.5 based models. +1. Through the command line interface launched from the invoke.sh / invoke.bat scripts, option [4] to download models. +2. Through the Model Manager UI with models from the *Tools* section of [www.models.invoke.ai](https://www.models.invoke.ai). To do this, copy the repo ID from the desired model page, and paste it in the Add Model field of the model manager. **Note** Both the IP-Adapter and the Image Encoder must be installed for IP-Adapter to work. For example, the [SD 1.5 IP-Adapter](https://models.invoke.ai/InvokeAI/ip_adapter_plus_sd15) and [SD1.5 Image Encoder](https://models.invoke.ai/InvokeAI/ip_adapter_sd_image_encoder) must be installed to use IP-Adapter with SD1.5 based models. 3. **Advanced -- Not recommended ** Manually downloading the IP-Adapter and Image Encoder files - Image Encoder folders shouid be placed in the `models\any\clip_vision` folders. IP Adapter Model folders should be placed in the relevant `ip-adapter` folder of relevant base model folder of Invoke root directory. For example, for the SDXL IP-Adapter, files should be added to the `model/sdxl/ip_adapter/` folder. #### Using IP-Adapter diff --git a/docs/features/MODEL_MERGING.md b/docs/features/MODEL_MERGING.md index 6adf4db16a..e384662ef5 100644 --- a/docs/features/MODEL_MERGING.md +++ b/docs/features/MODEL_MERGING.md @@ -16,9 +16,10 @@ Model Merging can be be done by navigating to the Model Manager and clicking the display all the diffusers-style models that InvokeAI knows about. If you do not see the model you are looking for, then it is probably a legacy checkpoint model and needs to be converted using the - `invoke` command-line client and its `!optimize` command. You - must select at least two models to merge. The third can be left at - "None" if you desire. + "Convert" option in the Web-based Model Manager tab. + + You must select at least two models to merge. The third can be left + at "None" if you desire. * Alpha: This is the ratio to use when combining models. It ranges from 0 to 1. The higher the value, the more weight is given to the diff --git a/docs/features/UTILITIES.md b/docs/features/UTILITIES.md index a73118d55a..2d62fe3a79 100644 --- a/docs/features/UTILITIES.md +++ b/docs/features/UTILITIES.md @@ -8,7 +8,7 @@ title: Command-line Utilities InvokeAI comes with several scripts that are accessible via the command line. To access these commands, start the "developer's -console" from the launcher (`invoke.bat` menu item [8]). Users who are +console" from the launcher (`invoke.bat` menu item [7]). Users who are familiar with Python can alternatively activate InvokeAI's virtual environment (typically, but not necessarily `invokeai/.venv`). @@ -34,7 +34,7 @@ invokeai-web --ram 7 ## **invokeai-merge** -This is the model merge script, the same as launcher option [4]. Call +This is the model merge script, the same as launcher option [3]. Call it with the `--gui` command-line argument to start the interactive console-based GUI. Alternatively, you can run it non-interactively using command-line arguments as illustrated in the example below which @@ -48,7 +48,7 @@ invokeai-merge --force --base-model sd-1 --models stable-diffusion-1.5 inkdiffus ## **invokeai-ti** This is the textual inversion training script that is run by launcher -option [3]. Call it with `--gui` to run the interactive console-based +option [2]. Call it with `--gui` to run the interactive console-based front end. It can also be run non-interactively. It has about a zillion arguments, but a typical training session can be launched with: @@ -68,7 +68,7 @@ in Windows). ## **invokeai-install** This is the console-based model install script that is run by launcher -option [5]. If called without arguments, it will launch the +option [4]. If called without arguments, it will launch the interactive console-based interface. It can also be used non-interactively to list, add and remove models as shown by these examples: @@ -148,7 +148,7 @@ launch the web server against it with `invokeai-web --root InvokeAI-New`. ## **invokeai-update** This is the interactive console-based script that is run by launcher -menu item [9] to update to a new version of InvokeAI. It takes no +menu item [8] to update to a new version of InvokeAI. It takes no command-line arguments. ## **invokeai-metadata** diff --git a/docs/features/index.md b/docs/features/index.md index bd37366314..6315b20ca5 100644 --- a/docs/features/index.md +++ b/docs/features/index.md @@ -28,7 +28,7 @@ Learn how to install and use ControlNet models for fine control over image output. ### * [Image-to-Image Guide](IMG2IMG.md) -Use a seed image to build new creations in the CLI. +Use a seed image to build new creations. ## Model Management diff --git a/docs/index.md b/docs/index.md index 9a426e5684..8c9ed5b7f8 100644 --- a/docs/index.md +++ b/docs/index.md @@ -143,7 +143,6 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB of RAM. ### Prompt Engineering - [Prompt Syntax](features/PROMPTS.md) -- [Generating Variations](features/VARIATIONS.md) ### InvokeAI Configuration - [Guide to InvokeAI Runtime Settings](features/CONFIGURATION.md) @@ -166,10 +165,8 @@ still a work in progress, but coming soon. ### Command-Line Interface Retired -The original "invokeai" command-line interface has been retired. The -`invokeai` command will now launch a new command-line client that can -be used by developers to create and test nodes. It is not intended to -be used for routine image generation or manipulation. +All "invokeai" command-line interfaces have been retired as of version +3.4. To launch the Web GUI from the command-line, use the command `invokeai-web` rather than the traditional `invokeai --web`. diff --git a/docs/installation/050_INSTALLING_MODELS.md b/docs/installation/050_INSTALLING_MODELS.md index d455d2146f..5333e2aa88 100644 --- a/docs/installation/050_INSTALLING_MODELS.md +++ b/docs/installation/050_INSTALLING_MODELS.md @@ -84,7 +84,7 @@ InvokeAI root directory's `autoimport` folder. ### Installation via `invokeai-model-install` -From the `invoke` launcher, choose option [5] "Download and install +From the `invoke` launcher, choose option [4] "Download and install models." This will launch the same script that prompted you to select models at install time. You can use this to add models that you skipped the first time around. It is all right to specify a model that diff --git a/docs/installation/deprecated_documentation/INSTALL_LINUX.md b/docs/installation/deprecated_documentation/INSTALL_LINUX.md index 1e66698ec2..97060f85ad 100644 --- a/docs/installation/deprecated_documentation/INSTALL_LINUX.md +++ b/docs/installation/deprecated_documentation/INSTALL_LINUX.md @@ -79,7 +79,7 @@ title: Manual Installation, Linux and obtaining an access token for downloading. It will then download and install the weights files for you. - Please look [here](../INSTALL_MANUAL.md) for a manual process for doing + Please look [here](../020_INSTALL_MANUAL.md) for a manual process for doing the same thing. 7. Start generating images! @@ -112,7 +112,7 @@ title: Manual Installation, Linux To use an alternative model you may invoke the `!switch` command in the CLI, or pass `--model ` during `invoke.py` launch for either the CLI or the Web UI. See [Command Line - Client](../../features/CLI.md#model-selection-and-importation). The + Client](../../deprecated/CLI.md#model-selection-and-importation). The model names are defined in `configs/models.yaml`. 8. Subsequently, to relaunch the script, be sure to run "conda activate diff --git a/docs/installation/deprecated_documentation/INSTALL_MAC.md b/docs/installation/deprecated_documentation/INSTALL_MAC.md index 7a3c5b564f..dea3c329a7 100644 --- a/docs/installation/deprecated_documentation/INSTALL_MAC.md +++ b/docs/installation/deprecated_documentation/INSTALL_MAC.md @@ -150,7 +150,7 @@ will do our best to help. To use an alternative model you may invoke the `!switch` command in the CLI, or pass `--model ` during `invoke.py` launch for either the CLI or the Web UI. See [Command Line - Client](../../features/CLI.md#model-selection-and-importation). The + Client](../../deprecated/CLI.md#model-selection-and-importation). The model names are defined in `configs/models.yaml`. --- diff --git a/docs/installation/deprecated_documentation/INSTALL_SOURCE.md b/docs/installation/deprecated_documentation/INSTALL_SOURCE.md index 2b1b750fbf..b71cd68ab7 100644 --- a/docs/installation/deprecated_documentation/INSTALL_SOURCE.md +++ b/docs/installation/deprecated_documentation/INSTALL_SOURCE.md @@ -128,7 +128,7 @@ python scripts/invoke.py --web --max_load_models=3 \ ``` These options are described in detail in the -[Command-Line Interface](../../features/CLI.md) documentation. +[Command-Line Interface](../../deprecated/CLI.md) documentation. ## Troubleshooting diff --git a/docs/installation/deprecated_documentation/INSTALL_WINDOWS.md b/docs/installation/deprecated_documentation/INSTALL_WINDOWS.md index 19acb832e4..9536f09db2 100644 --- a/docs/installation/deprecated_documentation/INSTALL_WINDOWS.md +++ b/docs/installation/deprecated_documentation/INSTALL_WINDOWS.md @@ -75,7 +75,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan obtaining an access token for downloading. It will then download and install the weights files for you. - Please look [here](../INSTALL_MANUAL.md) for a manual process for doing the + Please look [here](../020_INSTALL_MANUAL.md) for a manual process for doing the same thing. 8. Start generating images! @@ -108,7 +108,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan To use an alternative model you may invoke the `!switch` command in the CLI, or pass `--model ` during `invoke.py` launch for either the CLI or the Web UI. See [Command Line - Client](../../features/CLI.md#model-selection-and-importation). The + Client](../../deprecated/CLI.md#model-selection-and-importation). The model names are defined in `configs/models.yaml`. 9. Subsequently, to relaunch the script, first activate the Anaconda diff --git a/installer/templates/invoke.bat.in b/installer/templates/invoke.bat.in index 227091b33a..ee6d56fc56 100644 --- a/installer/templates/invoke.bat.in +++ b/installer/templates/invoke.bat.in @@ -9,41 +9,37 @@ set INVOKEAI_ROOT=. :start echo Desired action: echo 1. Generate images with the browser-based interface -echo 2. Explore InvokeAI nodes using a command-line interface -echo 3. Run textual inversion training -echo 4. Merge models (diffusers type only) -echo 5. Download and install models -echo 6. Change InvokeAI startup options -echo 7. Re-run the configure script to fix a broken install or to complete a major upgrade -echo 8. Open the developer console -echo 9. Update InvokeAI -echo 10. Run the InvokeAI image database maintenance script -echo 11. Command-line help +echo 2. Run textual inversion training +echo 3. Merge models (diffusers type only) +echo 4. Download and install models +echo 5. Change InvokeAI startup options +echo 6. Re-run the configure script to fix a broken install or to complete a major upgrade +echo 7. Open the developer console +echo 8. Update InvokeAI +echo 9. Run the InvokeAI image database maintenance script +echo 10. Command-line help echo Q - Quit -set /P choice="Please enter 1-11, Q: [1] " +set /P choice="Please enter 1-10, Q: [1] " if not defined choice set choice=1 IF /I "%choice%" == "1" ( echo Starting the InvokeAI browser-based UI.. python .venv\Scripts\invokeai-web.exe %* ) ELSE IF /I "%choice%" == "2" ( - echo Starting the InvokeAI command-line.. - python .venv\Scripts\invokeai.exe %* -) ELSE IF /I "%choice%" == "3" ( echo Starting textual inversion training.. python .venv\Scripts\invokeai-ti.exe --gui -) ELSE IF /I "%choice%" == "4" ( +) ELSE IF /I "%choice%" == "3" ( echo Starting model merging script.. python .venv\Scripts\invokeai-merge.exe --gui -) ELSE IF /I "%choice%" == "5" ( +) ELSE IF /I "%choice%" == "4" ( echo Running invokeai-model-install... python .venv\Scripts\invokeai-model-install.exe -) ELSE IF /I "%choice%" == "6" ( +) ELSE IF /I "%choice%" == "5" ( echo Running invokeai-configure... python .venv\Scripts\invokeai-configure.exe --skip-sd-weight --skip-support-models -) ELSE IF /I "%choice%" == "7" ( +) ELSE IF /I "%choice%" == "6" ( echo Running invokeai-configure... python .venv\Scripts\invokeai-configure.exe --yes --skip-sd-weight -) ELSE IF /I "%choice%" == "8" ( +) ELSE IF /I "%choice%" == "7" ( echo Developer Console echo Python command is: where python @@ -55,13 +51,13 @@ IF /I "%choice%" == "1" ( echo ************************* echo *** Type `exit` to quit this shell and deactivate the Python virtual environment *** call cmd /k -) ELSE IF /I "%choice%" == "9" ( +) ELSE IF /I "%choice%" == "8" ( echo Running invokeai-update... python -m invokeai.frontend.install.invokeai_update -) ELSE IF /I "%choice%" == "10" ( +) ELSE IF /I "%choice%" == "9" ( echo Running the db maintenance script... python .venv\Scripts\invokeai-db-maintenance.exe -) ELSE IF /I "%choice%" == "11" ( +) ELSE IF /I "%choice%" == "10" ( echo Displaying command line help... python .venv\Scripts\invokeai-web.exe --help %* pause diff --git a/installer/templates/invoke.sh.in b/installer/templates/invoke.sh.in index 6cf6967608..3230c9f442 100644 --- a/installer/templates/invoke.sh.in +++ b/installer/templates/invoke.sh.in @@ -58,52 +58,47 @@ do_choice() { invokeai-web $PARAMS ;; 2) - clear - printf "Explore InvokeAI nodes using a command-line interface\n" - invokeai $PARAMS - ;; - 3) clear printf "Textual inversion training\n" invokeai-ti --gui $PARAMS ;; - 4) + 3) clear printf "Merge models (diffusers type only)\n" invokeai-merge --gui $PARAMS ;; - 5) + 4) clear printf "Download and install models\n" invokeai-model-install --root ${INVOKEAI_ROOT} ;; - 6) + 5) clear printf "Change InvokeAI startup options\n" invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models ;; - 7) + 6) clear printf "Re-run the configure script to fix a broken install or to complete a major upgrade\n" invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only --skip-sd-weights ;; - 8) + 7) clear printf "Open the developer console\n" file_name=$(basename "${BASH_SOURCE[0]}") bash --init-file "$file_name" ;; - 9) + 8) clear printf "Update InvokeAI\n" python -m invokeai.frontend.install.invokeai_update ;; - 10) + 9) clear printf "Running the db maintenance script\n" invokeai-db-maintenance --root ${INVOKEAI_ROOT} ;; - 11) + 10) clear printf "Command-line help\n" invokeai-web --help @@ -121,16 +116,15 @@ do_choice() { do_dialog() { options=( 1 "Generate images with a browser-based interface" - 2 "Explore InvokeAI nodes using a command-line interface" - 3 "Textual inversion training" - 4 "Merge models (diffusers type only)" - 5 "Download and install models" - 6 "Change InvokeAI startup options" - 7 "Re-run the configure script to fix a broken install or to complete a major upgrade" - 8 "Open the developer console" - 9 "Update InvokeAI" - 10 "Run the InvokeAI image database maintenance script" - 11 "Command-line help" + 2 "Textual inversion training" + 3 "Merge models (diffusers type only)" + 4 "Download and install models" + 5 "Change InvokeAI startup options" + 6 "Re-run the configure script to fix a broken install or to complete a major upgrade" + 7 "Open the developer console" + 8 "Update InvokeAI" + 9 "Run the InvokeAI image database maintenance script" + 10 "Command-line help" ) choice=$(dialog --clear \ @@ -155,18 +149,17 @@ do_line_input() { printf " ** For a more attractive experience, please install the 'dialog' utility using your package manager. **\n\n" printf "What would you like to do?\n" printf "1: Generate images using the browser-based interface\n" - printf "2: Explore InvokeAI nodes using the command-line interface\n" - printf "3: Run textual inversion training\n" - printf "4: Merge models (diffusers type only)\n" - printf "5: Download and install models\n" - printf "6: Change InvokeAI startup options\n" - printf "7: Re-run the configure script to fix a broken install\n" - printf "8: Open the developer console\n" - printf "9: Update InvokeAI\n" - printf "10: Run the InvokeAI image database maintenance script\n" - printf "11: Command-line help\n" + printf "2: Run textual inversion training\n" + printf "3: Merge models (diffusers type only)\n" + printf "4: Download and install models\n" + printf "5: Change InvokeAI startup options\n" + printf "6: Re-run the configure script to fix a broken install\n" + printf "7: Open the developer console\n" + printf "8: Update InvokeAI\n" + printf "9: Run the InvokeAI image database maintenance script\n" + printf "10: Command-line help\n" printf "Q: Quit\n\n" - read -p "Please enter 1-11, Q: [1] " yn + read -p "Please enter 1-10, Q: [1] " yn choice=${yn:='1'} do_choice $choice clear diff --git a/invokeai/app/cli/__init__.py b/invokeai/app/cli/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/invokeai/app/cli/commands.py b/invokeai/app/cli/commands.py deleted file mode 100644 index c21c6315ed..0000000000 --- a/invokeai/app/cli/commands.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) - -import argparse -from abc import ABC, abstractmethod -from typing import Any, Callable, Iterable, Literal, Union, get_args, get_origin, get_type_hints - -import matplotlib.pyplot as plt -import networkx as nx -from pydantic import BaseModel, Field - -import invokeai.backend.util.logging as logger - -from ..invocations.baseinvocation import BaseInvocation -from ..invocations.image import ImageField -from ..services.graph import Edge, GraphExecutionState, LibraryGraph -from ..services.invoker import Invoker - - -def add_field_argument(command_parser, name: str, field, default_override=None): - default = ( - default_override - if default_override is not None - else field.default - if field.default_factory is None - else field.default_factory() - ) - if get_origin(field.annotation) == Literal: - allowed_values = get_args(field.annotation) - allowed_types = set() - for val in allowed_values: - allowed_types.add(type(val)) - allowed_types_list = list(allowed_types) - field_type = allowed_types_list[0] if len(allowed_types) == 1 else Union[allowed_types_list] # type: ignore - - command_parser.add_argument( - f"--{name}", - dest=name, - type=field_type, - default=default, - choices=allowed_values, - help=field.description, - ) - else: - command_parser.add_argument( - f"--{name}", - dest=name, - type=field.annotation, - default=default, - help=field.description, - ) - - -def add_parsers( - subparsers, - commands: list[type], - command_field: str = "type", - exclude_fields: list[str] = ["id", "type"], - add_arguments: Union[Callable[[argparse.ArgumentParser], None], None] = None, -): - """Adds parsers for each command to the subparsers""" - - # Create subparsers for each command - for command in commands: - hints = get_type_hints(command) - cmd_name = get_args(hints[command_field])[0] - command_parser = subparsers.add_parser(cmd_name, help=command.__doc__) - - if add_arguments is not None: - add_arguments(command_parser) - - # Convert all fields to arguments - fields = command.__fields__ # type: ignore - for name, field in fields.items(): - if name in exclude_fields: - continue - - add_field_argument(command_parser, name, field) - - -def add_graph_parsers( - subparsers, graphs: list[LibraryGraph], add_arguments: Union[Callable[[argparse.ArgumentParser], None], None] = None -): - for graph in graphs: - command_parser = subparsers.add_parser(graph.name, help=graph.description) - - if add_arguments is not None: - add_arguments(command_parser) - - # Add arguments for inputs - for exposed_input in graph.exposed_inputs: - node = graph.graph.get_node(exposed_input.node_path) - field = node.__fields__[exposed_input.field] - default_override = getattr(node, exposed_input.field) - add_field_argument(command_parser, exposed_input.alias, field, default_override) - - -class CliContext: - invoker: Invoker - session: GraphExecutionState - parser: argparse.ArgumentParser - defaults: dict[str, Any] - graph_nodes: dict[str, str] - nodes_added: list[str] - - def __init__(self, invoker: Invoker, session: GraphExecutionState, parser: argparse.ArgumentParser): - self.invoker = invoker - self.session = session - self.parser = parser - self.defaults = dict() - self.graph_nodes = dict() - self.nodes_added = list() - - def get_session(self): - self.session = self.invoker.services.graph_execution_manager.get(self.session.id) - return self.session - - def reset(self): - self.session = self.invoker.create_execution_state() - self.graph_nodes = dict() - self.nodes_added = list() - # Leave defaults unchanged - - def add_node(self, node: BaseInvocation): - self.get_session() - self.session.graph.add_node(node) - self.nodes_added.append(node.id) - self.invoker.services.graph_execution_manager.set(self.session) - - def add_edge(self, edge: Edge): - self.get_session() - self.session.add_edge(edge) - self.invoker.services.graph_execution_manager.set(self.session) - - -class ExitCli(Exception): - """Exception to exit the CLI""" - - pass - - -class BaseCommand(ABC, BaseModel): - """A CLI command""" - - # All commands must include a type name like this: - - @classmethod - def get_all_subclasses(cls): - subclasses = [] - toprocess = [cls] - while len(toprocess) > 0: - next = toprocess.pop(0) - next_subclasses = next.__subclasses__() - subclasses.extend(next_subclasses) - toprocess.extend(next_subclasses) - return subclasses - - @classmethod - def get_commands(cls): - return tuple(BaseCommand.get_all_subclasses()) - - @classmethod - def get_commands_map(cls): - # Get the type strings out of the literals and into a dictionary - return dict(map(lambda t: (get_args(get_type_hints(t)["type"])[0], t), BaseCommand.get_all_subclasses())) - - @abstractmethod - def run(self, context: CliContext) -> None: - """Run the command. Raise ExitCli to exit.""" - pass - - -class ExitCommand(BaseCommand): - """Exits the CLI""" - - type: Literal["exit"] = "exit" - - def run(self, context: CliContext) -> None: - raise ExitCli() - - -class HelpCommand(BaseCommand): - """Shows help""" - - type: Literal["help"] = "help" - - def run(self, context: CliContext) -> None: - context.parser.print_help() - - -def get_graph_execution_history( - graph_execution_state: GraphExecutionState, -) -> Iterable[str]: - """Gets the history of fully-executed invocations for a graph execution""" - return (n for n in reversed(graph_execution_state.executed_history) if n in graph_execution_state.graph.nodes) - - -def get_invocation_command(invocation) -> str: - fields = invocation.__fields__.items() - type_hints = get_type_hints(type(invocation)) - command = [invocation.type] - for name, field in fields: - if name in ["id", "type"]: - continue - - # TODO: add links - - # Skip image fields when serializing command - type_hint = type_hints.get(name) or None - if type_hint is ImageField or ImageField in get_args(type_hint): - continue - - field_value = getattr(invocation, name) - field_default = field.default - if field_value != field_default: - if type_hint is str or str in get_args(type_hint): - command.append(f'--{name} "{field_value}"') - else: - command.append(f"--{name} {field_value}") - - return " ".join(command) - - -class HistoryCommand(BaseCommand): - """Shows the invocation history""" - - type: Literal["history"] = "history" - - # Inputs - # fmt: off - count: int = Field(default=5, gt=0, description="The number of history entries to show") - # fmt: on - - def run(self, context: CliContext) -> None: - history = list(get_graph_execution_history(context.get_session())) - for i in range(min(self.count, len(history))): - entry_id = history[-1 - i] - entry = context.get_session().graph.get_node(entry_id) - logger.info(f"{entry_id}: {get_invocation_command(entry)}") - - -class SetDefaultCommand(BaseCommand): - """Sets a default value for a field""" - - type: Literal["default"] = "default" - - # Inputs - # fmt: off - field: str = Field(description="The field to set the default for") - value: str = Field(description="The value to set the default to, or None to clear the default") - # fmt: on - - def run(self, context: CliContext) -> None: - if self.value is None: - if self.field in context.defaults: - del context.defaults[self.field] - else: - context.defaults[self.field] = self.value - - -class DrawGraphCommand(BaseCommand): - """Debugs a graph""" - - type: Literal["draw_graph"] = "draw_graph" - - def run(self, context: CliContext) -> None: - session: GraphExecutionState = context.invoker.services.graph_execution_manager.get(context.session.id) - nxgraph = session.graph.nx_graph_flat() - - # Draw the networkx graph - plt.figure(figsize=(20, 20)) - pos = nx.spectral_layout(nxgraph) - nx.draw_networkx_nodes(nxgraph, pos, node_size=1000) - nx.draw_networkx_edges(nxgraph, pos, width=2) - nx.draw_networkx_labels(nxgraph, pos, font_size=20, font_family="sans-serif") - plt.axis("off") - plt.show() - - -class DrawExecutionGraphCommand(BaseCommand): - """Debugs an execution graph""" - - type: Literal["draw_xgraph"] = "draw_xgraph" - - def run(self, context: CliContext) -> None: - session: GraphExecutionState = context.invoker.services.graph_execution_manager.get(context.session.id) - nxgraph = session.execution_graph.nx_graph_flat() - - # Draw the networkx graph - plt.figure(figsize=(20, 20)) - pos = nx.spectral_layout(nxgraph) - nx.draw_networkx_nodes(nxgraph, pos, node_size=1000) - nx.draw_networkx_edges(nxgraph, pos, width=2) - nx.draw_networkx_labels(nxgraph, pos, font_size=20, font_family="sans-serif") - plt.axis("off") - plt.show() - - -class SortedHelpFormatter(argparse.HelpFormatter): - def _iter_indented_subactions(self, action): - try: - get_subactions = action._get_subactions - except AttributeError: - pass - else: - self._indent() - if isinstance(action, argparse._SubParsersAction): - for subaction in sorted(get_subactions(), key=lambda x: x.dest): - yield subaction - else: - for subaction in get_subactions(): - yield subaction - self._dedent() diff --git a/invokeai/app/cli/completer.py b/invokeai/app/cli/completer.py deleted file mode 100644 index 5aece8a058..0000000000 --- a/invokeai/app/cli/completer.py +++ /dev/null @@ -1,171 +0,0 @@ -""" -Readline helper functions for cli_app.py -You may import the global singleton `completer` to get access to the -completer object. -""" -import atexit -import readline -import shlex -from pathlib import Path -from typing import Dict, List, Literal, get_args, get_origin, get_type_hints - -import invokeai.backend.util.logging as logger - -from ...backend import ModelManager -from ..invocations.baseinvocation import BaseInvocation -from ..services.invocation_services import InvocationServices -from .commands import BaseCommand - -# singleton object, class variable -completer = None - - -class Completer(object): - def __init__(self, model_manager: ModelManager): - self.commands = self.get_commands() - self.matches = None - self.linebuffer = None - self.manager = model_manager - return - - def complete(self, text, state): - """ - Complete commands and switches fromm the node CLI command line. - Switches are determined in a context-specific manner. - """ - - buffer = readline.get_line_buffer() - if state == 0: - options = None - try: - current_command, current_switch = self.get_current_command(buffer) - options = self.get_command_options(current_command, current_switch) - except IndexError: - pass - options = options or list(self.parse_commands().keys()) - - if not text: # first time - self.matches = options - else: - self.matches = [s for s in options if s and s.startswith(text)] - - try: - match = self.matches[state] - except IndexError: - match = None - return match - - @classmethod - def get_commands(self) -> List[object]: - """ - Return a list of all the client commands and invocations. - """ - return BaseCommand.get_commands() + BaseInvocation.get_invocations() - - def get_current_command(self, buffer: str) -> tuple[str, str]: - """ - Parse the readline buffer to find the most recent command and its switch. - """ - if len(buffer) == 0: - return None, None - tokens = shlex.split(buffer) - command = None - switch = None - for t in tokens: - if t[0].isalpha(): - if switch is None: - command = t - else: - switch = t - # don't try to autocomplete switches that are already complete - if switch and buffer.endswith(" "): - switch = None - return command or "", switch or "" - - def parse_commands(self) -> Dict[str, List[str]]: - """ - Return a dict in which the keys are the command name - and the values are the parameters the command takes. - """ - result = dict() - for command in self.commands: - hints = get_type_hints(command) - name = get_args(hints["type"])[0] - result.update({name: hints}) - return result - - def get_command_options(self, command: str, switch: str) -> List[str]: - """ - Return all the parameters that can be passed to the command as - command-line switches. Returns None if the command is unrecognized. - """ - parsed_commands = self.parse_commands() - if command not in parsed_commands: - return None - - # handle switches in the format "-foo=bar" - argument = None - if switch and "=" in switch: - switch, argument = switch.split("=") - - parameter = switch.strip("-") - if parameter in parsed_commands[command]: - if argument is None: - return self.get_parameter_options(parameter, parsed_commands[command][parameter]) - else: - return [ - f"--{parameter}={x}" - for x in self.get_parameter_options(parameter, parsed_commands[command][parameter]) - ] - else: - return [f"--{x}" for x in parsed_commands[command].keys()] - - def get_parameter_options(self, parameter: str, typehint) -> List[str]: - """ - Given a parameter type (such as Literal), offers autocompletions. - """ - if get_origin(typehint) == Literal: - return get_args(typehint) - if parameter == "model": - return self.manager.model_names() - - def _pre_input_hook(self): - if self.linebuffer: - readline.insert_text(self.linebuffer) - readline.redisplay() - self.linebuffer = None - - -def set_autocompleter(services: InvocationServices) -> Completer: - global completer - - if completer: - return completer - - completer = Completer(services.model_manager) - - readline.set_completer(completer.complete) - try: - readline.set_auto_history(True) - except AttributeError: - # pyreadline3 does not have a set_auto_history() method - pass - readline.set_pre_input_hook(completer._pre_input_hook) - readline.set_completer_delims(" ") - readline.parse_and_bind("tab: complete") - readline.parse_and_bind("set print-completions-horizontally off") - readline.parse_and_bind("set page-completions on") - readline.parse_and_bind("set skip-completed-text on") - readline.parse_and_bind("set show-all-if-ambiguous on") - - histfile = Path(services.configuration.root_dir / ".invoke_history") - try: - readline.read_history_file(histfile) - readline.set_history_length(1000) - except FileNotFoundError: - pass - except OSError: # file likely corrupted - newname = f"{histfile}.old" - logger.error(f"Your history file {histfile} couldn't be loaded and may be corrupted. Renaming it to {newname}") - histfile.replace(Path(newname)) - atexit.register(readline.write_history_file, histfile) diff --git a/invokeai/app/cli_app.py b/invokeai/app/cli_app.py deleted file mode 100644 index 2f8a4d2cbd..0000000000 --- a/invokeai/app/cli_app.py +++ /dev/null @@ -1,484 +0,0 @@ -# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team - -from invokeai.app.services.invocation_cache.invocation_cache_memory import MemoryInvocationCache - -from .services.config import InvokeAIAppConfig - -# parse_args() must be called before any other imports. if it is not called first, consumers of the config -# which are imported/used before parse_args() is called will get the default config values instead of the -# values from the command line or config file. - -if True: # hack to make flake8 happy with imports coming after setting up the config - import argparse - import re - import shlex - import sqlite3 - import sys - import time - from typing import Optional, Union, get_type_hints - - import torch - from pydantic import BaseModel, ValidationError - from pydantic.fields import Field - - import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import) - from invokeai.app.services.board_image_record_storage import SqliteBoardImageRecordStorage - from invokeai.app.services.board_images import BoardImagesService, BoardImagesServiceDependencies - from invokeai.app.services.board_record_storage import SqliteBoardRecordStorage - from invokeai.app.services.boards import BoardService, BoardServiceDependencies - from invokeai.app.services.image_record_storage import SqliteImageRecordStorage - from invokeai.app.services.images import ImageService, ImageServiceDependencies - from invokeai.app.services.invocation_stats import InvocationStatsService - from invokeai.app.services.resource_name import SimpleNameService - from invokeai.app.services.urls import LocalUrlService - from invokeai.backend.util.logging import InvokeAILogger - from invokeai.version.invokeai_version import __version__ - - from .cli.commands import BaseCommand, CliContext, ExitCli, SortedHelpFormatter, add_graph_parsers, add_parsers - from .cli.completer import set_autocompleter - from .invocations.baseinvocation import BaseInvocation - from .services.default_graphs import create_system_graphs, default_text_to_image_graph_id - from .services.events import EventServiceBase - from .services.graph import ( - Edge, - EdgeConnection, - GraphExecutionState, - GraphInvocation, - LibraryGraph, - are_connection_types_compatible, - ) - from .services.image_file_storage import DiskImageFileStorage - from .services.invocation_queue import MemoryInvocationQueue - from .services.invocation_services import InvocationServices - from .services.invoker import Invoker - from .services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage - from .services.model_manager_service import ModelManagerService - from .services.processor import DefaultInvocationProcessor - from .services.sqlite import SqliteItemStorage - - if torch.backends.mps.is_available(): - import invokeai.backend.util.mps_fixes # noqa: F401 (monkeypatching on import) - -config = InvokeAIAppConfig.get_config() -config.parse_args() -logger = InvokeAILogger().get_logger(config=config) - - -class CliCommand(BaseModel): - command: Union[BaseCommand.get_commands() + BaseInvocation.get_invocations()] = Field(discriminator="type") # type: ignore - - -class InvalidArgs(Exception): - pass - - -def add_invocation_args(command_parser): - # Add linking capability - command_parser.add_argument( - "--link", - "-l", - action="append", - nargs=3, - help="A link in the format 'source_node source_field dest_field'. source_node can be relative to history (e.g. -1)", - ) - - command_parser.add_argument( - "--link_node", - "-ln", - action="append", - help="A link from all fields in the specified node. Node can be relative to history (e.g. -1)", - ) - - -def get_command_parser(services: InvocationServices) -> argparse.ArgumentParser: - # Create invocation parser - parser = argparse.ArgumentParser(formatter_class=SortedHelpFormatter) - - def exit(*args, **kwargs): - raise InvalidArgs - - parser.exit = exit - subparsers = parser.add_subparsers(dest="type") - - # Create subparsers for each invocation - invocations = BaseInvocation.get_all_subclasses() - add_parsers(subparsers, invocations, add_arguments=add_invocation_args) - - # Create subparsers for each command - commands = BaseCommand.get_all_subclasses() - add_parsers(subparsers, commands, exclude_fields=["type"]) - - # Create subparsers for exposed CLI graphs - # TODO: add a way to identify these graphs - text_to_image = services.graph_library.get(default_text_to_image_graph_id) - add_graph_parsers(subparsers, [text_to_image], add_arguments=add_invocation_args) - - return parser - - -class NodeField: - alias: str - node_path: str - field: str - field_type: type - - def __init__(self, alias: str, node_path: str, field: str, field_type: type): - self.alias = alias - self.node_path = node_path - self.field = field - self.field_type = field_type - - -def fields_from_type_hints(hints: dict[str, type], node_path: str) -> dict[str, NodeField]: - return {k: NodeField(alias=k, node_path=node_path, field=k, field_type=v) for k, v in hints.items()} - - -def get_node_input_field(graph: LibraryGraph, field_alias: str, node_id: str) -> NodeField: - """Gets the node field for the specified field alias""" - exposed_input = next(e for e in graph.exposed_inputs if e.alias == field_alias) - node_type = type(graph.graph.get_node(exposed_input.node_path)) - return NodeField( - alias=exposed_input.alias, - node_path=f"{node_id}.{exposed_input.node_path}", - field=exposed_input.field, - field_type=get_type_hints(node_type)[exposed_input.field], - ) - - -def get_node_output_field(graph: LibraryGraph, field_alias: str, node_id: str) -> NodeField: - """Gets the node field for the specified field alias""" - exposed_output = next(e for e in graph.exposed_outputs if e.alias == field_alias) - node_type = type(graph.graph.get_node(exposed_output.node_path)) - node_output_type = node_type.get_output_type() - return NodeField( - alias=exposed_output.alias, - node_path=f"{node_id}.{exposed_output.node_path}", - field=exposed_output.field, - field_type=get_type_hints(node_output_type)[exposed_output.field], - ) - - -def get_node_inputs(invocation: BaseInvocation, context: CliContext) -> dict[str, NodeField]: - """Gets the inputs for the specified invocation from the context""" - node_type = type(invocation) - if node_type is not GraphInvocation: - return fields_from_type_hints(get_type_hints(node_type), invocation.id) - else: - graph: LibraryGraph = context.invoker.services.graph_library.get(context.graph_nodes[invocation.id]) - return {e.alias: get_node_input_field(graph, e.alias, invocation.id) for e in graph.exposed_inputs} - - -def get_node_outputs(invocation: BaseInvocation, context: CliContext) -> dict[str, NodeField]: - """Gets the outputs for the specified invocation from the context""" - node_type = type(invocation) - if node_type is not GraphInvocation: - return fields_from_type_hints(get_type_hints(node_type.get_output_type()), invocation.id) - else: - graph: LibraryGraph = context.invoker.services.graph_library.get(context.graph_nodes[invocation.id]) - return {e.alias: get_node_output_field(graph, e.alias, invocation.id) for e in graph.exposed_outputs} - - -def generate_matching_edges(a: BaseInvocation, b: BaseInvocation, context: CliContext) -> list[Edge]: - """Generates all possible edges between two invocations""" - afields = get_node_outputs(a, context) - bfields = get_node_inputs(b, context) - - matching_fields = set(afields.keys()).intersection(bfields.keys()) - - # Remove invalid fields - invalid_fields = set(["type", "id"]) - matching_fields = matching_fields.difference(invalid_fields) - - # Validate types - matching_fields = [ - f for f in matching_fields if are_connection_types_compatible(afields[f].field_type, bfields[f].field_type) - ] - - edges = [ - Edge( - source=EdgeConnection(node_id=afields[alias].node_path, field=afields[alias].field), - destination=EdgeConnection(node_id=bfields[alias].node_path, field=bfields[alias].field), - ) - for alias in matching_fields - ] - return edges - - -class SessionError(Exception): - """Raised when a session error has occurred""" - - pass - - -def invoke_all(context: CliContext): - """Runs all invocations in the specified session""" - context.invoker.invoke(context.session, invoke_all=True) - while not context.get_session().is_complete(): - # Wait some time - time.sleep(0.1) - - # Print any errors - if context.session.has_error(): - for n in context.session.errors: - context.invoker.services.logger.error( - f"Error in node {n} (source node {context.session.prepared_source_mapping[n]}): {context.session.errors[n]}" - ) - - raise SessionError() - - -def invoke_cli(): - logger.info(f"InvokeAI version {__version__}") - # get the optional list of invocations to execute on the command line - parser = config.get_parser() - parser.add_argument("commands", nargs="*") - invocation_commands = parser.parse_args().commands - - # get the optional file to read commands from. - # Simplest is to use it for STDIN - if infile := config.from_file: - sys.stdin = open(infile, "r") - - model_manager = ModelManagerService(config, logger) - - events = EventServiceBase() - output_folder = config.output_path - - # TODO: build a file/path manager? - if config.use_memory_db: - db_location = ":memory:" - else: - db_location = config.db_path - db_location.parent.mkdir(parents=True, exist_ok=True) - - db_conn = sqlite3.connect(db_location, check_same_thread=False) # TODO: figure out a better threading solution - logger.info(f'InvokeAI database location is "{db_location}"') - - graph_execution_manager = SqliteItemStorage[GraphExecutionState](conn=db_conn, table_name="graph_executions") - - urls = LocalUrlService() - image_record_storage = SqliteImageRecordStorage(conn=db_conn) - image_file_storage = DiskImageFileStorage(f"{output_folder}/images") - names = SimpleNameService() - - board_record_storage = SqliteBoardRecordStorage(conn=db_conn) - board_image_record_storage = SqliteBoardImageRecordStorage(conn=db_conn) - - boards = BoardService( - services=BoardServiceDependencies( - board_image_record_storage=board_image_record_storage, - board_record_storage=board_record_storage, - image_record_storage=image_record_storage, - url=urls, - logger=logger, - ) - ) - - board_images = BoardImagesService( - services=BoardImagesServiceDependencies( - board_image_record_storage=board_image_record_storage, - board_record_storage=board_record_storage, - image_record_storage=image_record_storage, - url=urls, - logger=logger, - ) - ) - - images = ImageService( - services=ImageServiceDependencies( - board_image_record_storage=board_image_record_storage, - image_record_storage=image_record_storage, - image_file_storage=image_file_storage, - url=urls, - logger=logger, - names=names, - graph_execution_manager=graph_execution_manager, - ) - ) - - services = InvocationServices( - model_manager=model_manager, - events=events, - latents=ForwardCacheLatentsStorage(DiskLatentsStorage(f"{output_folder}/latents")), - images=images, - boards=boards, - board_images=board_images, - queue=MemoryInvocationQueue(), - graph_library=SqliteItemStorage[LibraryGraph](conn=db_conn, table_name="graphs"), - graph_execution_manager=graph_execution_manager, - processor=DefaultInvocationProcessor(), - performance_statistics=InvocationStatsService(graph_execution_manager), - logger=logger, - configuration=config, - invocation_cache=MemoryInvocationCache(max_cache_size=config.node_cache_size), - ) - - system_graphs = create_system_graphs(services.graph_library) - system_graph_names = set([g.name for g in system_graphs]) - set_autocompleter(services) - - invoker = Invoker(services) - session: GraphExecutionState = invoker.create_execution_state() - parser = get_command_parser(services) - - re_negid = re.compile("^-[0-9]+$") - - # Uncomment to print out previous sessions at startup - # print(services.session_manager.list()) - - context = CliContext(invoker, session, parser) - set_autocompleter(services) - - command_line_args_exist = len(invocation_commands) > 0 - done = False - - while not done: - try: - if command_line_args_exist: - cmd_input = invocation_commands.pop(0) - done = len(invocation_commands) == 0 - else: - cmd_input = input("invoke> ") - except (KeyboardInterrupt, EOFError): - # Ctrl-c exits - break - - try: - # Refresh the state of the session - # history = list(get_graph_execution_history(context.session)) - history = list(reversed(context.nodes_added)) - - # Split the command for piping - cmds = cmd_input.split("|") - start_id = len(context.nodes_added) - current_id = start_id - new_invocations = list() - for cmd in cmds: - if cmd is None or cmd.strip() == "": - raise InvalidArgs("Empty command") - - # Parse args to create invocation - args = vars(context.parser.parse_args(shlex.split(cmd.strip()))) - - # Override defaults - for field_name, field_default in context.defaults.items(): - if field_name in args: - args[field_name] = field_default - - # Parse invocation - command: CliCommand = None # type:ignore - system_graph: Optional[LibraryGraph] = None - if args["type"] in system_graph_names: - system_graph = next(filter(lambda g: g.name == args["type"], system_graphs)) - invocation = GraphInvocation(graph=system_graph.graph, id=str(current_id)) - for exposed_input in system_graph.exposed_inputs: - if exposed_input.alias in args: - node = invocation.graph.get_node(exposed_input.node_path) - field = exposed_input.field - setattr(node, field, args[exposed_input.alias]) - command = CliCommand(command=invocation) - context.graph_nodes[invocation.id] = system_graph.id - else: - args["id"] = current_id - command = CliCommand(command=args) - - if command is None: - continue - - # Run any CLI commands immediately - if isinstance(command.command, BaseCommand): - # Invoke all current nodes to preserve operation order - invoke_all(context) - - # Run the command - command.command.run(context) - continue - - # TODO: handle linking with library graphs - # Pipe previous command output (if there was a previous command) - edges: list[Edge] = list() - if len(history) > 0 or current_id != start_id: - from_id = history[0] if current_id == start_id else str(current_id - 1) - from_node = ( - next(filter(lambda n: n[0].id == from_id, new_invocations))[0] - if current_id != start_id - else context.session.graph.get_node(from_id) - ) - matching_edges = generate_matching_edges(from_node, command.command, context) - edges.extend(matching_edges) - - # Parse provided links - if "link_node" in args and args["link_node"]: - for link in args["link_node"]: - node_id = link - if re_negid.match(node_id): - node_id = str(current_id + int(node_id)) - - link_node = context.session.graph.get_node(node_id) - matching_edges = generate_matching_edges(link_node, command.command, context) - matching_destinations = [e.destination for e in matching_edges] - edges = [e for e in edges if e.destination not in matching_destinations] - edges.extend(matching_edges) - - if "link" in args and args["link"]: - for link in args["link"]: - edges = [ - e - for e in edges - if e.destination.node_id != command.command.id or e.destination.field != link[2] - ] - - node_id = link[0] - if re_negid.match(node_id): - node_id = str(current_id + int(node_id)) - - # TODO: handle missing input/output - node_output = get_node_outputs(context.session.graph.get_node(node_id), context)[link[1]] - node_input = get_node_inputs(command.command, context)[link[2]] - - edges.append( - Edge( - source=EdgeConnection(node_id=node_output.node_path, field=node_output.field), - destination=EdgeConnection(node_id=node_input.node_path, field=node_input.field), - ) - ) - - new_invocations.append((command.command, edges)) - - current_id = current_id + 1 - - # Add the node to the session - context.add_node(command.command) - for edge in edges: - print(edge) - context.add_edge(edge) - - # Execute all remaining nodes - invoke_all(context) - - except InvalidArgs: - invoker.services.logger.warning('Invalid command, use "help" to list commands') - continue - - except ValidationError: - invoker.services.logger.warning('Invalid command arguments, run " --help" for summary') - - except SessionError: - # Start a new session - invoker.services.logger.warning("Session error: creating a new session") - context.reset() - - except ExitCli: - break - - except SystemExit: - continue - - invoker.stop() - - -if __name__ == "__main__": - if config.version: - print(f"InvokeAI version {__version__}") - else: - invoke_cli() diff --git a/mkdocs.yml b/mkdocs.yml index f95d83ac8f..97b2a16f19 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -134,6 +134,7 @@ nav: - List of Default Nodes: 'nodes/defaultNodes.md' - Workflow Editor Usage: 'nodes/NODES.md' - ComfyUI to InvokeAI: 'nodes/comfyToInvoke.md' + - Facetool Node: 'nodes/detailedNodes/faceTools.md' - Contributing Nodes: 'nodes/contributingNodes.md' - Features: - Overview: 'features/index.md' @@ -144,7 +145,7 @@ nav: - Image-to-Image: 'features/IMG2IMG.md' - Controlling Logging: 'features/LOGGING.md' - Model Merging: 'features/MODEL_MERGING.md' - - Using Nodes : './nodes/overview' + - Using Nodes : 'nodes/overview.md' - NSFW Checker: 'features/WATERMARK+NSFW.md' - Postprocessing: 'features/POSTPROCESS.md' - Prompting Features: 'features/PROMPTS.md' @@ -152,15 +153,18 @@ nav: - Unified Canvas: 'features/UNIFIED_CANVAS.md' - InvokeAI Web Server: 'features/WEB.md' - WebUI Hotkeys: "features/WEBUIHOTKEYS.md" + - Maintenance Utilities: "features/UTILITIES.md" - Other: 'features/OTHER.md' - Contributing: - How to Contribute: 'contributing/CONTRIBUTING.md' + - InvokeAI Code of Conduct: 'CODE_OF_CONDUCT.md' - Development: - Overview: 'contributing/contribution_guides/development.md' - New Contributors: 'contributing/contribution_guides/newContributorChecklist.md' - InvokeAI Architecture: 'contributing/ARCHITECTURE.md' - Frontend Documentation: 'contributing/contribution_guides/contributingToFrontend.md' - Local Development: 'contributing/LOCAL_DEVELOPMENT.md' + - Adding Tests: 'contributing/TESTS.md' - Documentation: 'contributing/contribution_guides/documentation.md' - Nodes: 'contributing/INVOCATIONS.md' - Translation: 'contributing/contribution_guides/translation.md' @@ -168,9 +172,12 @@ nav: - Changelog: 'CHANGELOG.md' - Deprecated: - Command Line Interface: 'deprecated/CLI.md' + - Variations: 'deprecated/VARIATIONS.md' + - Translations: 'deprecated/TRANSLATION.md' - Embiggen: 'deprecated/EMBIGGEN.md' - Inpainting: 'deprecated/INPAINTING.md' - Outpainting: 'deprecated/OUTPAINTING.md' + - Troubleshooting: 'help/deprecated/TROUBLESHOOT.md' - Help: - Getting Started: 'help/gettingStartedWithAI.md' - Diffusion Overview: 'help/diffusion.md' diff --git a/pyproject.toml b/pyproject.toml index 67486e1120..96c6c3dd73 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -125,7 +125,7 @@ dependencies = [ # shortcut commands to start cli and web # "invokeai --web" will launch the web interface # "invokeai" will launch the CLI -"invokeai" = "invokeai.frontend.legacy_launch_invokeai:main" +# "invokeai" = "invokeai.frontend.legacy_launch_invokeai:main" # new shortcut to launch web interface "invokeai-web" = "invokeai.app.api_app:invoke_api" @@ -138,7 +138,6 @@ dependencies = [ "invokeai-migrate3" = "invokeai.backend.install.migrate_to_3:main" "invokeai-update" = "invokeai.frontend.install.invokeai_update:main" "invokeai-metadata" = "invokeai.backend.image_util.invoke_metadata:main" -"invokeai-node-cli" = "invokeai.app.cli_app:invoke_cli" "invokeai-node-web" = "invokeai.app.api_app:invoke_api" "invokeai-import-images" = "invokeai.frontend.install.import_images:main" "invokeai-db-maintenance" = "invokeai.backend.util.db_maintenance:main" From 67a343b3e45bfafec0b5deb62f0117610f3acab6 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 11:20:06 +1100 Subject: [PATCH 23/24] Update pyproject.toml --- pyproject.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 96c6c3dd73..2bcaea2efa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -122,9 +122,8 @@ dependencies = [ "configure_invokeai.py" = "invokeai.frontend.install.invokeai_configure:invokeai_configure" "textual_inversion.py" = "invokeai.frontend.training:invokeai_textual_inversion" -# shortcut commands to start cli and web +# shortcut commands to start web ui # "invokeai --web" will launch the web interface -# "invokeai" will launch the CLI # "invokeai" = "invokeai.frontend.legacy_launch_invokeai:main" # new shortcut to launch web interface From 024aa5eb90a72eb99125895bba2397a4f23886bd Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 18 Oct 2023 14:19:06 +1100 Subject: [PATCH 24/24] fix(ui): fix field sorting closes #4934 --- .../hooks/useAnyOrDirectInputFieldNames.ts | 19 +++++++--------- .../hooks/useConnectionInputFieldNames.ts | 22 +++++++++---------- .../nodes/hooks/useOutputFieldNames.ts | 8 +++---- .../nodes/util/getSortedFilteredFieldNames.ts | 20 +++++++++++++++++ 4 files changed, 42 insertions(+), 27 deletions(-) create mode 100644 invokeai/frontend/web/src/features/nodes/util/getSortedFilteredFieldNames.ts diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useAnyOrDirectInputFieldNames.ts b/invokeai/frontend/web/src/features/nodes/hooks/useAnyOrDirectInputFieldNames.ts index 36f2e8a62c..dda2efc156 100644 --- a/invokeai/frontend/web/src/features/nodes/hooks/useAnyOrDirectInputFieldNames.ts +++ b/invokeai/frontend/web/src/features/nodes/hooks/useAnyOrDirectInputFieldNames.ts @@ -9,6 +9,7 @@ import { POLYMORPHIC_TYPES, TYPES_WITH_INPUT_COMPONENTS, } from '../types/constants'; +import { getSortedFilteredFieldNames } from '../util/getSortedFilteredFieldNames'; export const useAnyOrDirectInputFieldNames = (nodeId: string) => { const selector = useMemo( @@ -24,17 +25,13 @@ export const useAnyOrDirectInputFieldNames = (nodeId: string) => { if (!nodeTemplate) { return []; } - return map(nodeTemplate.inputs) - .filter( - (field) => - (['any', 'direct'].includes(field.input) || - POLYMORPHIC_TYPES.includes(field.type)) && - TYPES_WITH_INPUT_COMPONENTS.includes(field.type) - ) - .filter((field) => !field.ui_hidden) - .sort((a, b) => (a.ui_order ?? 0) - (b.ui_order ?? 0)) - .map((field) => field.name) - .filter((fieldName) => fieldName !== 'is_intermediate'); + const fields = map(nodeTemplate.inputs).filter( + (field) => + (['any', 'direct'].includes(field.input) || + POLYMORPHIC_TYPES.includes(field.type)) && + TYPES_WITH_INPUT_COMPONENTS.includes(field.type) + ); + return getSortedFilteredFieldNames(fields); }, defaultSelectorOptions ), diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useConnectionInputFieldNames.ts b/invokeai/frontend/web/src/features/nodes/hooks/useConnectionInputFieldNames.ts index eea874cc87..9fb31df801 100644 --- a/invokeai/frontend/web/src/features/nodes/hooks/useConnectionInputFieldNames.ts +++ b/invokeai/frontend/web/src/features/nodes/hooks/useConnectionInputFieldNames.ts @@ -9,6 +9,7 @@ import { TYPES_WITH_INPUT_COMPONENTS, } from '../types/constants'; import { isInvocationNode } from '../types/types'; +import { getSortedFilteredFieldNames } from '../util/getSortedFilteredFieldNames'; export const useConnectionInputFieldNames = (nodeId: string) => { const selector = useMemo( @@ -24,17 +25,16 @@ export const useConnectionInputFieldNames = (nodeId: string) => { if (!nodeTemplate) { return []; } - return map(nodeTemplate.inputs) - .filter( - (field) => - (field.input === 'connection' && - !POLYMORPHIC_TYPES.includes(field.type)) || - !TYPES_WITH_INPUT_COMPONENTS.includes(field.type) - ) - .filter((field) => !field.ui_hidden) - .sort((a, b) => (a.ui_order ?? 0) - (b.ui_order ?? 0)) - .map((field) => field.name) - .filter((fieldName) => fieldName !== 'is_intermediate'); + + // get the visible fields + const fields = map(nodeTemplate.inputs).filter( + (field) => + (field.input === 'connection' && + !POLYMORPHIC_TYPES.includes(field.type)) || + !TYPES_WITH_INPUT_COMPONENTS.includes(field.type) + ); + + return getSortedFilteredFieldNames(fields); }, defaultSelectorOptions ), diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useOutputFieldNames.ts b/invokeai/frontend/web/src/features/nodes/hooks/useOutputFieldNames.ts index 97956a4889..e0a1e5433e 100644 --- a/invokeai/frontend/web/src/features/nodes/hooks/useOutputFieldNames.ts +++ b/invokeai/frontend/web/src/features/nodes/hooks/useOutputFieldNames.ts @@ -5,6 +5,7 @@ import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions'; import { map } from 'lodash-es'; import { useMemo } from 'react'; import { isInvocationNode } from '../types/types'; +import { getSortedFilteredFieldNames } from '../util/getSortedFilteredFieldNames'; export const useOutputFieldNames = (nodeId: string) => { const selector = useMemo( @@ -20,11 +21,8 @@ export const useOutputFieldNames = (nodeId: string) => { if (!nodeTemplate) { return []; } - return map(nodeTemplate.outputs) - .filter((field) => !field.ui_hidden) - .sort((a, b) => (a.ui_order ?? 0) - (b.ui_order ?? 0)) - .map((field) => field.name) - .filter((fieldName) => fieldName !== 'is_intermediate'); + + return getSortedFilteredFieldNames(map(nodeTemplate.outputs)); }, defaultSelectorOptions ), diff --git a/invokeai/frontend/web/src/features/nodes/util/getSortedFilteredFieldNames.ts b/invokeai/frontend/web/src/features/nodes/util/getSortedFilteredFieldNames.ts new file mode 100644 index 0000000000..b235fe8a07 --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/util/getSortedFilteredFieldNames.ts @@ -0,0 +1,20 @@ +import { isNil } from 'lodash-es'; +import { InputFieldTemplate, OutputFieldTemplate } from '../types/types'; + +export const getSortedFilteredFieldNames = ( + fields: InputFieldTemplate[] | OutputFieldTemplate[] +): string[] => { + const visibleFields = fields.filter((field) => !field.ui_hidden); + + // we want explicitly ordered fields to be before unordered fields; split the list + const orderedFields = visibleFields + .filter((f) => !isNil(f.ui_order)) + .sort((a, b) => (a.ui_order ?? 0) - (b.ui_order ?? 0)); + const unorderedFields = visibleFields.filter((f) => isNil(f.ui_order)); + + // concat the lists, and return the field names, skipping `is_intermediate` + return orderedFields + .concat(unorderedFields) + .map((f) => f.name) + .filter((fieldName) => fieldName !== 'is_intermediate'); +};