From 5aa7bfebd4de0f3b2105941449b3b6c5f8eeb8af Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Wed, 16 Aug 2023 20:28:33 +0300 Subject: [PATCH 01/20] Fix masked generation with inpaint models --- .../stable_diffusion/diffusers_pipeline.py | 34 ++++++++++--------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index aa809bc3ec..3796bbbec7 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -360,28 +360,30 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): latents = self.scheduler.add_noise(latents, noise, batched_t) if mask is not None: + # if no noise provided, noisify unmasked area based on seed(or 0 as fallback) + if noise is None: + noise = torch.randn( + orig_latents.shape, + dtype=torch.float32, + device="cpu", + generator=torch.Generator(device="cpu").manual_seed(seed or 0), + ).to(device=orig_latents.device, dtype=orig_latents.dtype) + + latents = self.scheduler.add_noise(latents, noise, batched_t) + latents = torch.lerp( + orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype) + ) + if is_inpainting_model(self.unet): # You'd think the inpainting model wouldn't be paying attention to the area it is going to repaint # (that's why there's a mask!) but it seems to really want that blanked out. - # masked_latents = latents * torch.where(mask < 0.5, 1, 0) TODO: inpaint/outpaint/infill + masked_latents = orig_latents * torch.where(mask < 0.5, 1, 0) # TODO: we should probably pass this in so we don't have to try/finally around setting it. - self.invokeai_diffuser.model_forward_callback = AddsMaskLatents(self._unet_forward, mask, orig_latents) + self.invokeai_diffuser.model_forward_callback = AddsMaskLatents( + self._unet_forward, mask, masked_latents + ) else: - # if no noise provided, noisify unmasked area based on seed(or 0 as fallback) - if noise is None: - noise = torch.randn( - orig_latents.shape, - dtype=torch.float32, - device="cpu", - generator=torch.Generator(device="cpu").manual_seed(seed or 0), - ).to(device=orig_latents.device, dtype=orig_latents.dtype) - - latents = self.scheduler.add_noise(latents, noise, batched_t) - latents = torch.lerp( - orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype) - ) - additional_guidance.append(AddsMaskGuidance(mask, orig_latents, self.scheduler, noise)) try: From bf0dfcac2fb1b820b441fd0819ffe9371bc6b58e Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Thu, 17 Aug 2023 19:19:07 +0300 Subject: [PATCH 02/20] Add inapint mask field class --- invokeai/app/invocations/primitives.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/invokeai/app/invocations/primitives.py b/invokeai/app/invocations/primitives.py index 61ef9fa27c..7444e826e9 100644 --- a/invokeai/app/invocations/primitives.py +++ b/invokeai/app/invocations/primitives.py @@ -305,6 +305,18 @@ class ImageCollectionInvocation(BaseInvocation): return ImageCollectionOutput(collection=self.collection) +# endregion + +# region InpaintMask + + +class InpaintMaskField(BaseModel): + """An inapint mask field""" + + mask_name: str = Field(description="The name of the mask image") + masked_latens_name: Optional[str] = Field(description="The name of the masked image latents") + + # endregion # region Latents From ff5c72558605a70ff59b13075aa02e6147e7552d Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Thu, 17 Aug 2023 19:35:03 +0300 Subject: [PATCH 03/20] Update mask field type --- invokeai/app/invocations/latent.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 40f7af8703..e75637bd58 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -24,6 +24,7 @@ from invokeai.app.invocations.primitives import ( ImageOutput, LatentsField, LatentsOutput, + InpaintMaskField, build_latents_output, ) from invokeai.app.util.controlnet_utils import prepare_control_image @@ -126,7 +127,7 @@ class DenoiseLatentsInvocation(BaseInvocation): default=None, description=FieldDescriptions.control, input=Input.Connection ) latents: Optional[LatentsField] = InputField(description=FieldDescriptions.latents, input=Input.Connection) - mask: Optional[ImageField] = InputField( + mask: Optional[InpaintMaskField] = InputField( default=None, description=FieldDescriptions.mask, ) From b2133353167d6c4f20dd5d1a025ec850e067256f Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Fri, 18 Aug 2023 04:54:23 +1200 Subject: [PATCH 04/20] feat: Add InpaintMask Field type --- invokeai/app/invocations/primitives.py | 6 +-- .../components/fields/InputFieldRenderer.tsx | 14 +++++++ .../fieldTypes/InpaintMaskInputField.tsx | 17 +++++++++ .../web/src/features/nodes/types/constants.ts | 5 +++ .../web/src/features/nodes/types/types.ts | 13 +++++++ .../nodes/util/fieldTemplateBuilders.ts | 20 ++++++++++ .../features/nodes/util/fieldValueBuilders.ts | 4 ++ .../frontend/web/src/services/api/schema.d.ts | 38 +++++++++++++------ 8 files changed, 103 insertions(+), 14 deletions(-) create mode 100644 invokeai/frontend/web/src/features/nodes/components/fields/fieldTypes/InpaintMaskInputField.tsx diff --git a/invokeai/app/invocations/primitives.py b/invokeai/app/invocations/primitives.py index 7444e826e9..63738b349f 100644 --- a/invokeai/app/invocations/primitives.py +++ b/invokeai/app/invocations/primitives.py @@ -1,10 +1,10 @@ # Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) from typing import Literal, Optional, Tuple, Union -from anyio import Condition -from pydantic import BaseModel, Field import torch +from anyio import Condition +from pydantic import BaseModel, Field from .baseinvocation import ( BaseInvocation, @@ -311,7 +311,7 @@ class ImageCollectionInvocation(BaseInvocation): class InpaintMaskField(BaseModel): - """An inapint mask field""" + """An inpaint mask field""" mask_name: str = Field(description="The name of the mask image") masked_latens_name: Optional[str] = Field(description="The name of the masked image latents") diff --git a/invokeai/frontend/web/src/features/nodes/components/fields/InputFieldRenderer.tsx b/invokeai/frontend/web/src/features/nodes/components/fields/InputFieldRenderer.tsx index acec921d8e..0ebddfc2fb 100644 --- a/invokeai/frontend/web/src/features/nodes/components/fields/InputFieldRenderer.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/fields/InputFieldRenderer.tsx @@ -15,6 +15,7 @@ import ControlNetModelInputField from './fieldTypes/ControlNetModelInputField'; import EnumInputField from './fieldTypes/EnumInputField'; import ImageCollectionInputField from './fieldTypes/ImageCollectionInputField'; import ImageInputField from './fieldTypes/ImageInputField'; +import InpaintMaskInputField from './fieldTypes/InpaintMaskInputField'; import LatentsInputField from './fieldTypes/LatentsInputField'; import LoRAModelInputField from './fieldTypes/LoRAModelInputField'; import MainModelInputField from './fieldTypes/MainModelInputField'; @@ -93,6 +94,19 @@ const InputFieldRenderer = ({ nodeId, fieldName }: InputFieldProps) => { ); } + if ( + field?.type === 'InpaintMaskField' && + fieldTemplate?.type === 'InpaintMaskField' + ) { + return ( + + ); + } + if ( field?.type === 'LatentsField' && fieldTemplate?.type === 'LatentsField' diff --git a/invokeai/frontend/web/src/features/nodes/components/fields/fieldTypes/InpaintMaskInputField.tsx b/invokeai/frontend/web/src/features/nodes/components/fields/fieldTypes/InpaintMaskInputField.tsx new file mode 100644 index 0000000000..cf786db3a5 --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/components/fields/fieldTypes/InpaintMaskInputField.tsx @@ -0,0 +1,17 @@ +import { + InpaintMaskFieldValue, + InpaintMaskInputFieldTemplate, +} from 'features/nodes/types/types'; +import { memo } from 'react'; +import { FieldComponentProps } from './types'; + +const InpaintMaskInputFieldComponent = ( + _props: FieldComponentProps< + InpaintMaskFieldValue, + InpaintMaskInputFieldTemplate + > +) => { + return null; +}; + +export default memo(InpaintMaskInputFieldComponent); diff --git a/invokeai/frontend/web/src/features/nodes/types/constants.ts b/invokeai/frontend/web/src/features/nodes/types/constants.ts index 1c5c89ff2d..a6b4293bb5 100644 --- a/invokeai/frontend/web/src/features/nodes/types/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/types/constants.ts @@ -57,6 +57,11 @@ export const FIELDS: Record = { description: 'Images may be passed between nodes.', color: 'purple.500', }, + InpaintMaskField: { + title: 'Inpaint Mask', + description: 'Inpaint Mask may be passed between nodes', + color: 'purple.500', + }, LatentsField: { title: 'Latents', description: 'Latents may be passed between nodes.', diff --git a/invokeai/frontend/web/src/features/nodes/types/types.ts b/invokeai/frontend/web/src/features/nodes/types/types.ts index 60e4877fd8..85152ac2d9 100644 --- a/invokeai/frontend/web/src/features/nodes/types/types.ts +++ b/invokeai/frontend/web/src/features/nodes/types/types.ts @@ -63,6 +63,7 @@ export const zFieldType = z.enum([ 'string', 'array', 'ImageField', + 'InpaintMaskField', 'LatentsField', 'ConditioningField', 'ControlField', @@ -121,6 +122,7 @@ export type InputFieldValue = | StringInputFieldValue | BooleanInputFieldValue | ImageInputFieldValue + | InpaintMaskFieldValue | LatentsInputFieldValue | ConditioningInputFieldValue | UNetInputFieldValue @@ -151,6 +153,7 @@ export type InputFieldTemplate = | StringInputFieldTemplate | BooleanInputFieldTemplate | ImageInputFieldTemplate + | InpaintMaskInputFieldTemplate | LatentsInputFieldTemplate | ConditioningInputFieldTemplate | UNetInputFieldTemplate @@ -277,6 +280,11 @@ export type ImageCollectionInputFieldValue = InputFieldValueBase & { value?: ImageField[]; }; +export type InpaintMaskFieldValue = InputFieldValueBase & { + type: 'InpaintMaskField'; + value?: undefined; +}; + export type MainModelInputFieldValue = InputFieldValueBase & { type: 'MainModelField'; value?: MainModelParam | OnnxModelParam; @@ -374,6 +382,11 @@ export type ImageCollectionInputFieldTemplate = InputFieldTemplateBase & { type: 'ImageCollection'; }; +export type InpaintMaskInputFieldTemplate = InputFieldTemplateBase & { + default: undefined; + type: 'InpaintMaskField'; +}; + export type LatentsInputFieldTemplate = InputFieldTemplateBase & { default: string; type: 'LatentsField'; diff --git a/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts b/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts index 37aaab59b6..653a1d44b0 100644 --- a/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts +++ b/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts @@ -17,6 +17,7 @@ import { FloatInputFieldTemplate, ImageCollectionInputFieldTemplate, ImageInputFieldTemplate, + InpaintMaskInputFieldTemplate, InputFieldTemplateBase, IntegerInputFieldTemplate, InvocationFieldSchema, @@ -268,6 +269,19 @@ const buildImageCollectionInputFieldTemplate = ({ return template; }; +const buildInpaintMaskInputFieldTemplate = ({ + schemaObject, + baseField, +}: BuildInputFieldArg): InpaintMaskInputFieldTemplate => { + const template: InpaintMaskInputFieldTemplate = { + ...baseField, + type: 'InpaintMaskField', + default: schemaObject.default ?? undefined, + }; + + return template; +}; + const buildLatentsInputFieldTemplate = ({ schemaObject, baseField, @@ -489,6 +503,12 @@ export const buildInputFieldTemplate = ( baseField, }); } + if (fieldType === 'InpaintMaskField') { + return buildInpaintMaskInputFieldTemplate({ + schemaObject: fieldSchema, + baseField, + }); + } if (fieldType === 'LatentsField') { return buildLatentsInputFieldTemplate({ schemaObject: fieldSchema, diff --git a/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts b/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts index 473dc83bb6..5b6f9c53de 100644 --- a/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts +++ b/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts @@ -49,6 +49,10 @@ export const buildInputFieldValue = ( fieldValue.value = []; } + if (template.type === 'InpaintMaskField') { + fieldValue.value = undefined; + } + if (template.type === 'LatentsField') { fieldValue.value = undefined; } diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index 316ee0c085..0ee19bf893 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -1616,7 +1616,7 @@ export type components = { * Mask * @description The mask to use for the operation */ - mask?: components["schemas"]["ImageField"]; + mask?: components["schemas"]["InpaintMaskField"]; /** * Positive Conditioning * @description Positive conditioning tensor @@ -1914,7 +1914,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: (components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]) | undefined; + [key: string]: (components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]) | undefined; }; /** * Edges @@ -1957,7 +1957,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: (components["schemas"]["BooleanOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"]) | undefined; + [key: string]: (components["schemas"]["BooleanOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"]) | undefined; }; /** * Errors @@ -3112,6 +3112,22 @@ export type components = { */ seed?: number; }; + /** + * InpaintMaskField + * @description An inpaint mask field + */ + InpaintMaskField: { + /** + * Mask Name + * @description The name of the mask image + */ + mask_name: string; + /** + * Masked Latens Name + * @description The name of the masked image latents + */ + masked_latens_name?: string; + }; /** * Integer Primitive Collection * @description A collection of integer primitive values @@ -6193,12 +6209,6 @@ export type components = { ui_hidden: boolean; ui_type?: components["schemas"]["UIType"]; }; - /** - * StableDiffusionOnnxModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; /** * StableDiffusion1ModelFormat * @description An enumeration. @@ -6223,6 +6233,12 @@ export type components = { * @enum {string} */ StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusionOnnxModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionOnnxModelFormat: "olive" | "onnx"; }; responses: never; parameters: never; @@ -6333,7 +6349,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; + "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; }; responses: { @@ -6370,7 +6386,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; + "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; }; responses: { From cfd827cfad834a18497bb664937c7e0bcf3bbb97 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Fri, 18 Aug 2023 04:07:40 +0300 Subject: [PATCH 05/20] Added node for creating mask inpaint --- invokeai/app/invocations/latent.py | 143 ++++++++++++++---- invokeai/app/invocations/primitives.py | 9 +- .../stable_diffusion/diffusers_pipeline.py | 7 +- 3 files changed, 122 insertions(+), 37 deletions(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index e75637bd58..9d26d0a196 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -25,6 +25,7 @@ from invokeai.app.invocations.primitives import ( LatentsField, LatentsOutput, InpaintMaskField, + InpaintMaskOutput, build_latents_output, ) from invokeai.app.util.controlnet_utils import prepare_control_image @@ -66,6 +67,76 @@ DEFAULT_PRECISION = choose_precision(choose_torch_device()) SAMPLER_NAME_VALUES = Literal[tuple(list(SCHEDULER_MAP.keys()))] +@title("Create inpaint mask") +@tags("mask", "inpaint") +class CreateInpaintMaskInvocation(BaseInvocation): + """Creates mask for inpaint model run.""" + + # Metadata + type: Literal["create_inpaint_mask"] = "create_inpaint_mask" + + # Inputs + image: Optional[ImageField] = InputField(default=None, description="Image which will be inpainted") + mask: ImageField = InputField(description="The mask to use when pasting") + vae: VaeField = InputField( + description=FieldDescriptions.vae, + input=Input.Connection, + ) + tiled: bool = InputField(default=False, description=FieldDescriptions.tiled) + fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32) + + def prep_mask_tensor(self, mask_image): + if mask_image.mode != "L": + # FIXME: why do we get passed an RGB image here? We can only use single-channel. + mask_image = mask_image.convert("L") + mask_tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False) + if mask_tensor.dim() == 3: + mask_tensor = mask_tensor.unsqueeze(0) + #if shape is not None: + # mask_tensor = tv_resize(mask_tensor, shape, T.InterpolationMode.BILINEAR) + return mask_tensor + + @torch.no_grad() + def invoke(self, context: InvocationContext) -> InpaintMaskOutput: + if self.image is not None: + image = context.services.images.get_pil_image(self.image.image_name) + image = image_resized_to_grid_as_tensor(image.convert("RGB")) + if image.dim() == 3: + image = image.unsqueeze(0) + else: + image = None + + mask = self.prep_mask_tensor( + context.services.images.get_pil_image(self.mask.image_name), + ) + + if image is not None: + vae_info = context.services.model_manager.get_model( + **self.vae.vae.dict(), + context=context, + ) + + img_mask = tv_resize(mask, image.shape[-2:], T.InterpolationMode.BILINEAR) + masked_image = image * torch.where(img_mask < 0.5, 0.0, 1.0) + # TODO: + masked_latents = ImageToLatentsInvocation.vae_encode(vae_info, self.fp32, self.tiled, masked_image.clone()) + + masked_latents_name = f"{context.graph_execution_state_id}__{self.id}_masked_latents" + context.services.latents.save(masked_latents_name, masked_latents) + else: + masked_latents_name = None + + mask_name = f"{context.graph_execution_state_id}__{self.id}_mask" + context.services.latents.save(mask_name, mask) + + return InpaintMaskOutput( + inpaint_mask=InpaintMaskField( + mask_name=mask_name, + masked_latents_name=masked_latents_name, + ), + ) + + def get_scheduler( context: InvocationContext, scheduler_info: ModelInfo, @@ -340,19 +411,18 @@ class DenoiseLatentsInvocation(BaseInvocation): return num_inference_steps, timesteps, init_timestep - def prep_mask_tensor(self, mask, context, lantents): - if mask is None: - return None + def prep_inpaint_mask(self, context, latents): + if self.mask is None: + return None, None - mask_image = context.services.images.get_pil_image(mask.image_name) - if mask_image.mode != "L": - # FIXME: why do we get passed an RGB image here? We can only use single-channel. - mask_image = mask_image.convert("L") - mask_tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False) - if mask_tensor.dim() == 3: - mask_tensor = mask_tensor.unsqueeze(0) - mask_tensor = tv_resize(mask_tensor, lantents.shape[-2:], T.InterpolationMode.BILINEAR) - return 1 - mask_tensor + mask = context.services.latents.get(self.mask.mask_name) + mask = tv_resize(mask, latents.shape[-2:], T.InterpolationMode.BILINEAR) + if self.mask.masked_latents_name is not None: + masked_latents = context.services.latents.get(self.mask.masked_latents_name) + else: + masked_latents = None + + return 1 - mask, masked_latents @torch.no_grad() def invoke(self, context: InvocationContext) -> LatentsOutput: @@ -373,7 +443,7 @@ class DenoiseLatentsInvocation(BaseInvocation): if seed is None: seed = 0 - mask = self.prep_mask_tensor(self.mask, context, latents) + mask, masked_latents = self.prep_inpaint_mask(context, latents) # Get the source node id (we are invoking the prepared node) graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id) @@ -404,6 +474,8 @@ class DenoiseLatentsInvocation(BaseInvocation): noise = noise.to(device=unet.device, dtype=unet.dtype) if mask is not None: mask = mask.to(device=unet.device, dtype=unet.dtype) + if masked_latents is not None: + masked_latents = masked_latents.to(device=unet.device, dtype=unet.dtype) scheduler = get_scheduler( context=context, @@ -440,6 +512,7 @@ class DenoiseLatentsInvocation(BaseInvocation): noise=noise, seed=seed, mask=mask, + masked_latents=masked_latents, num_inference_steps=num_inference_steps, conditioning_data=conditioning_data, control_data=control_data, # list[ControlNetData] @@ -661,26 +734,11 @@ class ImageToLatentsInvocation(BaseInvocation): tiled: bool = InputField(default=False, description=FieldDescriptions.tiled) fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32) - @torch.no_grad() - def invoke(self, context: InvocationContext) -> LatentsOutput: - # image = context.services.images.get( - # self.image.image_type, self.image.image_name - # ) - image = context.services.images.get_pil_image(self.image.image_name) - - # vae_info = context.services.model_manager.get_model(**self.vae.vae.dict()) - vae_info = context.services.model_manager.get_model( - **self.vae.vae.dict(), - context=context, - ) - - image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB")) - if image_tensor.dim() == 3: - image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w") - + @staticmethod + def vae_encode(vae_info, upcast, tiled, image_tensor): with vae_info as vae: orig_dtype = vae.dtype - if self.fp32: + if upcast: vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( @@ -705,7 +763,7 @@ class ImageToLatentsInvocation(BaseInvocation): vae.to(dtype=torch.float16) # latents = latents.half() - if self.tiled: + if tiled: vae.enable_tiling() else: vae.disable_tiling() @@ -719,6 +777,27 @@ class ImageToLatentsInvocation(BaseInvocation): latents = vae.config.scaling_factor * latents latents = latents.to(dtype=orig_dtype) + return latents + + @torch.no_grad() + def invoke(self, context: InvocationContext) -> LatentsOutput: + # image = context.services.images.get( + # self.image.image_type, self.image.image_name + # ) + image = context.services.images.get_pil_image(self.image.image_name) + + # vae_info = context.services.model_manager.get_model(**self.vae.vae.dict()) + vae_info = context.services.model_manager.get_model( + **self.vae.vae.dict(), + context=context, + ) + + image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB")) + if image_tensor.dim() == 3: + image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w") + + latents = self.vae_encode(vae_info, self.fp32, self.tiled, image_tensor) + name = f"{context.graph_execution_state_id}__{self.id}" latents = latents.to("cpu") context.services.latents.save(name, latents) diff --git a/invokeai/app/invocations/primitives.py b/invokeai/app/invocations/primitives.py index 63738b349f..e9656271ac 100644 --- a/invokeai/app/invocations/primitives.py +++ b/invokeai/app/invocations/primitives.py @@ -314,7 +314,14 @@ class InpaintMaskField(BaseModel): """An inpaint mask field""" mask_name: str = Field(description="The name of the mask image") - masked_latens_name: Optional[str] = Field(description="The name of the masked image latents") + masked_latents_name: Optional[str] = Field(description="The name of the masked image latents") + + +class InpaintMaskOutput(BaseInvocationOutput): + """Base class for nodes that output a single image""" + + type: Literal["inpaint_mask_output"] = "inpaint_mask_output" + inpaint_mask: InpaintMaskField = OutputField(description="Mask for inpaint model run") # endregion diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 3796bbbec7..fb1ceb5b1c 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -342,6 +342,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): callback: Callable[[PipelineIntermediateState], None] = None, control_data: List[ControlNetData] = None, mask: Optional[torch.Tensor] = None, + masked_latents: Optional[torch.Tensor] = None, seed: Optional[int] = None, ) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]: if init_timestep.shape[0] == 0: @@ -375,11 +376,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): ) if is_inpainting_model(self.unet): - # You'd think the inpainting model wouldn't be paying attention to the area it is going to repaint - # (that's why there's a mask!) but it seems to really want that blanked out. - masked_latents = orig_latents * torch.where(mask < 0.5, 1, 0) + if masked_latents is None: + raise Exception("Source image required for inpaint mask when inpaint model used!") - # TODO: we should probably pass this in so we don't have to try/finally around setting it. self.invokeai_diffuser.model_forward_callback = AddsMaskLatents( self._unet_forward, mask, masked_latents ) From c49851e027fa47df15ea5affa37f04d5d2a67d25 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 18 Aug 2023 16:05:39 +1000 Subject: [PATCH 06/20] chore: minor cleanup after merge & flake8 --- invokeai/app/invocations/latent.py | 28 +++++++--------------------- 1 file changed, 7 insertions(+), 21 deletions(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index a5dbf55695..6fc1657bc3 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -22,18 +22,18 @@ from invokeai.app.invocations.metadata import CoreMetadata from invokeai.app.invocations.primitives import ( ImageField, ImageOutput, - LatentsField, - LatentsOutput, InpaintMaskField, InpaintMaskOutput, + LatentsField, + LatentsOutput, build_latents_output, ) from invokeai.app.util.controlnet_utils import prepare_control_image from invokeai.app.util.step_callback import stable_diffusion_step_callback from invokeai.backend.model_management.models import ModelType, SilenceWarnings -from ...backend.model_management.models import BaseModelType from ...backend.model_management.lora import ModelPatcher +from ...backend.model_management.models import BaseModelType from ...backend.stable_diffusion import PipelineIntermediateState from ...backend.stable_diffusion.diffusers_pipeline import ( ConditioningData, @@ -45,16 +45,7 @@ from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import Post from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP from ...backend.util.devices import choose_precision, choose_torch_device from ..models.image import ImageCategory, ResourceOrigin -from .baseinvocation import ( - BaseInvocation, - FieldDescriptions, - Input, - InputField, - InvocationContext, - UIType, - tags, - title, -) +from .baseinvocation import BaseInvocation, FieldDescriptions, Input, InputField, InvocationContext, UIType, tags, title from .compel import ConditioningField from .controlnet_image_processors import ControlField from .model import ModelInfo, UNetField, VaeField @@ -65,7 +56,7 @@ DEFAULT_PRECISION = choose_precision(choose_torch_device()) SAMPLER_NAME_VALUES = Literal[tuple(list(SCHEDULER_MAP.keys()))] -@title("Create inpaint mask") +@title("Create Inpaint Mask") @tags("mask", "inpaint") class CreateInpaintMaskInvocation(BaseInvocation): """Creates mask for inpaint model run.""" @@ -85,12 +76,11 @@ class CreateInpaintMaskInvocation(BaseInvocation): def prep_mask_tensor(self, mask_image): if mask_image.mode != "L": - # FIXME: why do we get passed an RGB image here? We can only use single-channel. mask_image = mask_image.convert("L") mask_tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False) if mask_tensor.dim() == 3: mask_tensor = mask_tensor.unsqueeze(0) - #if shape is not None: + # if shape is not None: # mask_tensor = tv_resize(mask_tensor, shape, T.InterpolationMode.BILINEAR) return mask_tensor @@ -107,7 +97,7 @@ class CreateInpaintMaskInvocation(BaseInvocation): mask = self.prep_mask_tensor( context.services.images.get_pil_image(self.mask.image_name), ) - + if image is not None: vae_info = context.services.model_manager.get_model( **self.vae.vae.dict(), @@ -779,12 +769,8 @@ class ImageToLatentsInvocation(BaseInvocation): @torch.no_grad() def invoke(self, context: InvocationContext) -> LatentsOutput: - # image = context.services.images.get( - # self.image.image_type, self.image.image_name - # ) image = context.services.images.get_pil_image(self.image.image_name) - # vae_info = context.services.model_manager.get_model(**self.vae.vae.dict()) vae_info = context.services.model_manager.get_model( **self.vae.vae.dict(), context=context, From d52a096607895fdd6e53102d830e198e3b3171db Mon Sep 17 00:00:00 2001 From: Mary Hipp Date: Thu, 24 Aug 2023 13:29:53 -0400 Subject: [PATCH 07/20] enable preselected image actions --- .../frontend/web/src/app/components/App.tsx | 18 ++++- .../web/src/app/components/InvokeAIUI.tsx | 11 ++- .../parameters/hooks/usePreselectedImage.ts | 81 +++++++++++++++++++ 3 files changed, 107 insertions(+), 3 deletions(-) create mode 100644 invokeai/frontend/web/src/features/parameters/hooks/usePreselectedImage.ts diff --git a/invokeai/frontend/web/src/app/components/App.tsx b/invokeai/frontend/web/src/app/components/App.tsx index c2cc4645b8..a4a0997443 100644 --- a/invokeai/frontend/web/src/app/components/App.tsx +++ b/invokeai/frontend/web/src/app/components/App.tsx @@ -12,24 +12,34 @@ import { languageSelector } from 'features/system/store/systemSelectors'; import InvokeTabs from 'features/ui/components/InvokeTabs'; import i18n from 'i18n'; import { size } from 'lodash-es'; -import { ReactNode, memo, useCallback, useEffect } from 'react'; +import { ReactNode, memo, useCallback, useEffect, useMemo } from 'react'; import { ErrorBoundary } from 'react-error-boundary'; import AppErrorBoundaryFallback from './AppErrorBoundaryFallback'; import GlobalHotkeys from './GlobalHotkeys'; import Toaster from './Toaster'; +import { usePreselectedImage } from '../../features/parameters/hooks/usePreselectedImage'; const DEFAULT_CONFIG = {}; interface Props { config?: PartialAppConfig; headerComponent?: ReactNode; + selectedImage?: { + imageName: string; + action: 'sendToImg2Img' | 'sendToCanvas' | 'useAllParameters'; + }; } -const App = ({ config = DEFAULT_CONFIG, headerComponent }: Props) => { +const App = ({ + config = DEFAULT_CONFIG, + headerComponent, + selectedImage, +}: Props) => { const language = useAppSelector(languageSelector); const logger = useLogger('system'); const dispatch = useAppDispatch(); + const { handlePreselectedImage } = usePreselectedImage(); const handleReset = useCallback(() => { localStorage.clear(); location.reload(); @@ -51,6 +61,10 @@ const App = ({ config = DEFAULT_CONFIG, headerComponent }: Props) => { dispatch(appStarted()); }, [dispatch]); + useEffect(() => { + handlePreselectedImage(selectedImage); + }, [handlePreselectedImage, selectedImage]); + return ( { useEffect(() => { // configure API client token @@ -81,7 +86,11 @@ const InvokeAIUI = ({ }> - + diff --git a/invokeai/frontend/web/src/features/parameters/hooks/usePreselectedImage.ts b/invokeai/frontend/web/src/features/parameters/hooks/usePreselectedImage.ts new file mode 100644 index 0000000000..fa310a66ad --- /dev/null +++ b/invokeai/frontend/web/src/features/parameters/hooks/usePreselectedImage.ts @@ -0,0 +1,81 @@ +import { skipToken } from '@reduxjs/toolkit/dist/query'; +import { useCallback, useMemo, useState } from 'react'; +import { + useGetImageDTOQuery, + useGetImageMetadataQuery, +} from '../../../services/api/endpoints/images'; +import { useAppDispatch } from '../../../app/store/storeHooks'; +import { setInitialCanvasImage } from '../../canvas/store/canvasSlice'; +import { setActiveTab } from '../../ui/store/uiSlice'; +import { useRecallParameters } from './useRecallParameters'; +import { initialImageSelected } from '../store/actions'; +import { useAppToaster } from '../../../app/components/Toaster'; +import { t } from 'i18next'; + +type SelectedImage = { + imageName: string; + action: 'sendToImg2Img' | 'sendToCanvas' | 'useAllParameters'; +}; + +export const usePreselectedImage = () => { + const dispatch = useAppDispatch(); + const [imageNameForDto, setImageNameForDto] = useState(); + const [imageNameForMetadata, setImageNameForMetadata] = useState< + string | undefined + >(); + const { recallAllParameters } = useRecallParameters(); + const toaster = useAppToaster(); + + const { currentData: selectedImageDto, isError } = useGetImageDTOQuery( + imageNameForDto ?? skipToken + ); + + const { currentData: selectedImageMetadata } = useGetImageMetadataQuery( + imageNameForMetadata ?? skipToken + ); + + const handlePreselectedImage = useCallback( + (selectedImage?: SelectedImage) => { + if (!selectedImage) { +return; +} + + if (selectedImage.action === 'sendToCanvas') { + setImageNameForDto(selectedImage?.imageName); + if (selectedImageDto) { + dispatch(setInitialCanvasImage(selectedImageDto)); + dispatch(setActiveTab('unifiedCanvas')); + toaster({ + title: t('toast.sentToUnifiedCanvas'), + status: 'info', + duration: 2500, + isClosable: true, + }); + } + } + + if (selectedImage.action === 'sendToImg2Img') { + setImageNameForDto(selectedImage?.imageName); + if (selectedImageDto) { + dispatch(initialImageSelected(selectedImageDto)); + } + } + + if (selectedImage.action === 'useAllParameters') { + setImageNameForMetadata(selectedImage?.imageName); + if (selectedImageMetadata) { + recallAllParameters(selectedImageMetadata.metadata); + } + } + }, + [ + dispatch, + selectedImageDto, + selectedImageMetadata, + recallAllParameters, + toaster, + ] + ); + + return { handlePreselectedImage }; +}; From 382a55afd308d03989da0d71626ccef60d86d048 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 27 Aug 2023 03:07:42 +1200 Subject: [PATCH 08/20] fix: merge conflicts --- .../Invocation/fields/InputFieldRenderer.tsx | 14 ++++++ .../fields/inputs/InpaintMaskInputField.tsx | 6 +-- .../web/src/features/nodes/types/types.ts | 49 ++++++------------- 3 files changed, 32 insertions(+), 37 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx index 9b3ce100c8..176e8a5905 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx @@ -13,6 +13,7 @@ import ControlNetModelInputField from './inputs/ControlNetModelInputField'; import EnumInputField from './inputs/EnumInputField'; import ImageCollectionInputField from './inputs/ImageCollectionInputField'; import ImageInputField from './inputs/ImageInputField'; +import InpaintMaskInputField from './inputs/InpaintMaskInputField'; import LatentsInputField from './inputs/LatentsInputField'; import LoRAModelInputField from './inputs/LoRAModelInputField'; import MainModelInputField from './inputs/MainModelInputField'; @@ -105,6 +106,19 @@ const InputFieldRenderer = ({ nodeId, fieldName }: InputFieldProps) => { ); } + if ( + field?.type === 'InpaintMaskField' && + fieldTemplate?.type === 'InpaintMaskField' + ) { + return ( + + ); + } + if ( field?.type === 'ConditioningField' && fieldTemplate?.type === 'ConditioningField' diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/InpaintMaskInputField.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/InpaintMaskInputField.tsx index cf786db3a5..248d5922af 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/InpaintMaskInputField.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/InpaintMaskInputField.tsx @@ -1,13 +1,13 @@ import { - InpaintMaskFieldValue, + FieldComponentProps, InpaintMaskInputFieldTemplate, + InpaintMaskInputFieldValue, } from 'features/nodes/types/types'; import { memo } from 'react'; -import { FieldComponentProps } from './types'; const InpaintMaskInputFieldComponent = ( _props: FieldComponentProps< - InpaintMaskFieldValue, + InpaintMaskInputFieldValue, InpaintMaskInputFieldTemplate > ) => { diff --git a/invokeai/frontend/web/src/features/nodes/types/types.ts b/invokeai/frontend/web/src/features/nodes/types/types.ts index 961ec458c4..11672b2a64 100644 --- a/invokeai/frontend/web/src/features/nodes/types/types.ts +++ b/invokeai/frontend/web/src/features/nodes/types/types.ts @@ -109,40 +109,6 @@ export type FieldType = z.infer; export const isFieldType = (value: unknown): value is FieldType => zFieldType.safeParse(value).success; -/** - * An input field is persisted across reloads as part of the user's local state. - * - * An input field has three properties: - * - `id` a unique identifier - * - `name` the name of the field, which comes from the python dataclass - * - `value` the field's value - */ -export type InputFieldValue = - | IntegerInputFieldValue - | SeedInputFieldValue - | FloatInputFieldValue - | StringInputFieldValue - | BooleanInputFieldValue - | ImageInputFieldValue - | InpaintMaskFieldValue - | LatentsInputFieldValue - | ConditioningInputFieldValue - | UNetInputFieldValue - | ClipInputFieldValue - | VaeInputFieldValue - | ControlInputFieldValue - | EnumInputFieldValue - | MainModelInputFieldValue - | SDXLMainModelInputFieldValue - | SDXLRefinerModelInputFieldValue - | VaeModelInputFieldValue - | LoRAModelInputFieldValue - | ControlNetModelInputFieldValue - | CollectionInputFieldValue - | CollectionItemInputFieldValue - | ColorInputFieldValue - | ImageCollectionInputFieldValue; - /** * An input field template is generated on each page load from the OpenAPI schema. * @@ -241,6 +207,12 @@ export const zConditioningField = z.object({ }); export type ConditioningField = z.infer; +export const zInpaintMaskField = z.object({ + mask_name: z.string().trim().min(1), + masked_latents_name: z.string().trim().min(1).optional(), +}); +export type InpaintMaskFieldValue = z.infer; + export const zIntegerInputFieldValue = zInputFieldValueBase.extend({ type: z.literal('integer'), value: z.number().optional(), @@ -277,6 +249,14 @@ export const zLatentsInputFieldValue = zInputFieldValueBase.extend({ }); export type LatentsInputFieldValue = z.infer; +export const zInpaintMaskInputFieldValue = zInputFieldValueBase.extend({ + type: z.literal('InpaintMaskField'), + value: zInpaintMaskField.optional(), +}); +export type InpaintMaskInputFieldValue = z.infer< + typeof zInpaintMaskInputFieldValue +>; + export const zConditioningInputFieldValue = zInputFieldValueBase.extend({ type: z.literal('ConditioningField'), value: zConditioningField.optional(), @@ -495,6 +475,7 @@ export const zInputFieldValue = z.discriminatedUnion('type', [ zBooleanInputFieldValue, zImageInputFieldValue, zLatentsInputFieldValue, + zInpaintMaskInputFieldValue, zConditioningInputFieldValue, zUNetInputFieldValue, zClipInputFieldValue, From af3e316cee1c8576177aee088b6e3d70781eda4d Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 27 Aug 2023 03:12:03 +1200 Subject: [PATCH 09/20] chore: Regen schema --- .../frontend/web/src/services/api/schema.d.ts | 1945 ++++++----------- 1 file changed, 702 insertions(+), 1243 deletions(-) diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index ee5a3f86e3..71a14c9bd3 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -3,301 +3,302 @@ * Do not make direct changes to the file. */ + export type paths = { - '/api/v1/sessions/': { + "/api/v1/sessions/": { /** * List Sessions * @description Gets a list of sessions, optionally searching */ - get: operations['list_sessions']; + get: operations["list_sessions"]; /** * Create Session * @description Creates a new session, optionally initializing it with an invocation graph */ - post: operations['create_session']; + post: operations["create_session"]; }; - '/api/v1/sessions/{session_id}': { + "/api/v1/sessions/{session_id}": { /** * Get Session * @description Gets a session */ - get: operations['get_session']; + get: operations["get_session"]; }; - '/api/v1/sessions/{session_id}/nodes': { + "/api/v1/sessions/{session_id}/nodes": { /** * Add Node * @description Adds a node to the graph */ - post: operations['add_node']; + post: operations["add_node"]; }; - '/api/v1/sessions/{session_id}/nodes/{node_path}': { + "/api/v1/sessions/{session_id}/nodes/{node_path}": { /** * Update Node * @description Updates a node in the graph and removes all linked edges */ - put: operations['update_node']; + put: operations["update_node"]; /** * Delete Node * @description Deletes a node in the graph and removes all linked edges */ - delete: operations['delete_node']; + delete: operations["delete_node"]; }; - '/api/v1/sessions/{session_id}/edges': { + "/api/v1/sessions/{session_id}/edges": { /** * Add Edge * @description Adds an edge to the graph */ - post: operations['add_edge']; + post: operations["add_edge"]; }; - '/api/v1/sessions/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}': { + "/api/v1/sessions/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}": { /** * Delete Edge * @description Deletes an edge from the graph */ - delete: operations['delete_edge']; + delete: operations["delete_edge"]; }; - '/api/v1/sessions/{session_id}/invoke': { + "/api/v1/sessions/{session_id}/invoke": { /** * Invoke Session * @description Invokes a session */ - put: operations['invoke_session']; + put: operations["invoke_session"]; /** * Cancel Session Invoke * @description Invokes a session */ - delete: operations['cancel_session_invoke']; + delete: operations["cancel_session_invoke"]; }; - '/api/v1/models/': { + "/api/v1/models/": { /** * List Models * @description Gets a list of models */ - get: operations['list_models']; + get: operations["list_models"]; }; - '/api/v1/models/{base_model}/{model_type}/{model_name}': { + "/api/v1/models/{base_model}/{model_type}/{model_name}": { /** * Delete Model * @description Delete Model */ - delete: operations['del_model']; + delete: operations["del_model"]; /** * Update Model * @description Update model contents with a new config. If the model name or base fields are changed, then the model is renamed. */ - patch: operations['update_model']; + patch: operations["update_model"]; }; - '/api/v1/models/import': { + "/api/v1/models/import": { /** * Import Model * @description Add a model using its local path, repo_id, or remote URL. Model characteristics will be probed and configured automatically */ - post: operations['import_model']; + post: operations["import_model"]; }; - '/api/v1/models/add': { + "/api/v1/models/add": { /** * Add Model * @description Add a model using the configuration information appropriate for its type. Only local models can be added by path */ - post: operations['add_model']; + post: operations["add_model"]; }; - '/api/v1/models/convert/{base_model}/{model_type}/{model_name}': { + "/api/v1/models/convert/{base_model}/{model_type}/{model_name}": { /** * Convert Model * @description Convert a checkpoint model into a diffusers model, optionally saving to the indicated destination directory, or `models` if none. */ - put: operations['convert_model']; + put: operations["convert_model"]; }; - '/api/v1/models/search': { + "/api/v1/models/search": { /** Search For Models */ - get: operations['search_for_models']; + get: operations["search_for_models"]; }; - '/api/v1/models/ckpt_confs': { + "/api/v1/models/ckpt_confs": { /** * List Ckpt Configs * @description Return a list of the legacy checkpoint configuration files stored in `ROOT/configs/stable-diffusion`, relative to ROOT. */ - get: operations['list_ckpt_configs']; + get: operations["list_ckpt_configs"]; }; - '/api/v1/models/sync': { + "/api/v1/models/sync": { /** * Sync To Config * @description Call after making changes to models.yaml, autoimport directories or models directory to synchronize * in-memory data structures with disk data structures. */ - post: operations['sync_to_config']; + post: operations["sync_to_config"]; }; - '/api/v1/models/merge/{base_model}': { + "/api/v1/models/merge/{base_model}": { /** * Merge Models * @description Convert a checkpoint model into a diffusers model */ - put: operations['merge_models']; + put: operations["merge_models"]; }; - '/api/v1/images/upload': { + "/api/v1/images/upload": { /** * Upload Image * @description Uploads an image */ - post: operations['upload_image']; + post: operations["upload_image"]; }; - '/api/v1/images/i/{image_name}': { + "/api/v1/images/i/{image_name}": { /** * Get Image Dto * @description Gets an image's DTO */ - get: operations['get_image_dto']; + get: operations["get_image_dto"]; /** * Delete Image * @description Deletes an image */ - delete: operations['delete_image']; + delete: operations["delete_image"]; /** * Update Image * @description Updates an image */ - patch: operations['update_image']; + patch: operations["update_image"]; }; - '/api/v1/images/clear-intermediates': { + "/api/v1/images/clear-intermediates": { /** * Clear Intermediates * @description Clears all intermediates */ - post: operations['clear_intermediates']; + post: operations["clear_intermediates"]; }; - '/api/v1/images/i/{image_name}/metadata': { + "/api/v1/images/i/{image_name}/metadata": { /** * Get Image Metadata * @description Gets an image's metadata */ - get: operations['get_image_metadata']; + get: operations["get_image_metadata"]; }; - '/api/v1/images/i/{image_name}/full': { + "/api/v1/images/i/{image_name}/full": { /** * Get Image Full * @description Gets a full-resolution image file */ - get: operations['get_image_full']; + get: operations["get_image_full"]; /** * Get Image Full * @description Gets a full-resolution image file */ - head: operations['get_image_full']; + head: operations["get_image_full"]; }; - '/api/v1/images/i/{image_name}/thumbnail': { + "/api/v1/images/i/{image_name}/thumbnail": { /** * Get Image Thumbnail * @description Gets a thumbnail image file */ - get: operations['get_image_thumbnail']; + get: operations["get_image_thumbnail"]; }; - '/api/v1/images/i/{image_name}/urls': { + "/api/v1/images/i/{image_name}/urls": { /** * Get Image Urls * @description Gets an image and thumbnail URL */ - get: operations['get_image_urls']; + get: operations["get_image_urls"]; }; - '/api/v1/images/': { + "/api/v1/images/": { /** * List Image Dtos * @description Gets a list of image DTOs */ - get: operations['list_image_dtos']; + get: operations["list_image_dtos"]; }; - '/api/v1/images/delete': { + "/api/v1/images/delete": { /** Delete Images From List */ - post: operations['delete_images_from_list']; + post: operations["delete_images_from_list"]; }; - '/api/v1/images/star': { + "/api/v1/images/star": { /** Star Images In List */ - post: operations['star_images_in_list']; + post: operations["star_images_in_list"]; }; - '/api/v1/images/unstar': { + "/api/v1/images/unstar": { /** Unstar Images In List */ - post: operations['unstar_images_in_list']; + post: operations["unstar_images_in_list"]; }; - '/api/v1/boards/': { + "/api/v1/boards/": { /** * List Boards * @description Gets a list of boards */ - get: operations['list_boards']; + get: operations["list_boards"]; /** * Create Board * @description Creates a board */ - post: operations['create_board']; + post: operations["create_board"]; }; - '/api/v1/boards/{board_id}': { + "/api/v1/boards/{board_id}": { /** * Get Board * @description Gets a board */ - get: operations['get_board']; + get: operations["get_board"]; /** * Delete Board * @description Deletes a board */ - delete: operations['delete_board']; + delete: operations["delete_board"]; /** * Update Board * @description Updates a board */ - patch: operations['update_board']; + patch: operations["update_board"]; }; - '/api/v1/boards/{board_id}/image_names': { + "/api/v1/boards/{board_id}/image_names": { /** * List All Board Image Names * @description Gets a list of images for a board */ - get: operations['list_all_board_image_names']; + get: operations["list_all_board_image_names"]; }; - '/api/v1/board_images/': { + "/api/v1/board_images/": { /** * Add Image To Board * @description Creates a board_image */ - post: operations['add_image_to_board']; + post: operations["add_image_to_board"]; /** * Remove Image From Board * @description Removes an image from its board, if it had one */ - delete: operations['remove_image_from_board']; + delete: operations["remove_image_from_board"]; }; - '/api/v1/board_images/batch': { + "/api/v1/board_images/batch": { /** * Add Images To Board * @description Adds a list of images to a board */ - post: operations['add_images_to_board']; + post: operations["add_images_to_board"]; }; - '/api/v1/board_images/batch/delete': { + "/api/v1/board_images/batch/delete": { /** * Remove Images From Board * @description Removes a list of images from their board, if they had one */ - post: operations['remove_images_from_board']; + post: operations["remove_images_from_board"]; }; - '/api/v1/app/version': { + "/api/v1/app/version": { /** Get Version */ - get: operations['app_version']; + get: operations["app_version"]; }; - '/api/v1/app/config': { + "/api/v1/app/config": { /** Get Config */ - get: operations['get_config']; + get: operations["get_config"]; }; - '/api/v1/app/logging': { + "/api/v1/app/logging": { /** * Get Log Level * @description Returns the log level */ - get: operations['get_log_level']; + get: operations["get_log_level"]; /** * Set Log Level * @description Sets the log verbosity level */ - post: operations['set_log_level']; + post: operations["set_log_level"]; }; }; @@ -339,7 +340,7 @@ export type components = { * @default add * @enum {string} */ - type: 'add'; + type: "add"; /** * A * @description The first number @@ -367,7 +368,7 @@ export type components = { * Upscaling Methods * @description List of upscaling methods */ - upscaling_methods: components['schemas']['Upscaler'][]; + upscaling_methods: components["schemas"]["Upscaler"][]; /** * Nsfw Methods * @description List of NSFW checking methods @@ -395,7 +396,7 @@ export type components = { * @description An enumeration. * @enum {string} */ - BaseModelType: 'sd-1' | 'sd-2' | 'sdxl' | 'sdxl-refiner'; + BaseModelType: "sd-1" | "sd-2" | "sdxl" | "sdxl-refiner"; /** * Blank Image * @description Creates a blank image and forwards it to the pipeline @@ -417,7 +418,7 @@ export type components = { * @default blank_image * @enum {string} */ - type: 'blank_image'; + type: "blank_image"; /** * Width * @description The width of the image @@ -436,7 +437,7 @@ export type components = { * @default RGB * @enum {string} */ - mode?: 'RGB' | 'RGBA'; + mode?: "RGB" | "RGBA"; /** * Color * @description The color of the image @@ -447,7 +448,7 @@ export type components = { * "a": 255 * } */ - color?: components['schemas']['ColorField']; + color?: components["schemas"]["ColorField"]; }; /** * Blend Latents @@ -470,17 +471,17 @@ export type components = { * @default lblend * @enum {string} */ - type: 'lblend'; + type: "lblend"; /** * Latents A * @description Latents tensor */ - latents_a?: components['schemas']['LatentsField']; + latents_a?: components["schemas"]["LatentsField"]; /** * Latents B * @description Latents tensor */ - latents_b?: components['schemas']['LatentsField']; + latents_b?: components["schemas"]["LatentsField"]; /** * Alpha * @description Blending factor. 0.0 = use input A only, 1.0 = use input B only, 0.5 = 50% mix of input A and input B. @@ -589,7 +590,7 @@ export type components = { * @default v_prediction * @enum {string} */ - prediction_type?: 'v_prediction' | 'epsilon' | 'sample'; + prediction_type?: "v_prediction" | "epsilon" | "sample"; }; /** Body_merge_models */ Body_merge_models: { @@ -610,7 +611,7 @@ export type components = { */ alpha?: number; /** @description Interpolation method */ - interp: components['schemas']['MergeInterpolationMethod']; + interp: components["schemas"]["MergeInterpolationMethod"]; /** * Force * @description Force merging of models created with different versions of diffusers @@ -684,7 +685,7 @@ export type components = { * @default boolean_collection * @enum {string} */ - type: 'boolean_collection'; + type: "boolean_collection"; /** * Collection * @description The collection of boolean values @@ -701,7 +702,7 @@ export type components = { * @default boolean_collection_output * @enum {string} */ - type: 'boolean_collection_output'; + type: "boolean_collection_output"; /** * Collection * @description The output boolean collection @@ -729,7 +730,7 @@ export type components = { * @default boolean * @enum {string} */ - type: 'boolean'; + type: "boolean"; /** * Value * @description The boolean value @@ -747,7 +748,7 @@ export type components = { * @default boolean_output * @enum {string} */ - type: 'boolean_output'; + type: "boolean_output"; /** * Value * @description The output boolean @@ -775,12 +776,12 @@ export type components = { * @default canny_image_processor * @enum {string} */ - type: 'canny_image_processor'; + type: "canny_image_processor"; /** * Image * @description The image to process */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Low Threshold * @description The low threshold of the Canny pixel gradient (0-255) @@ -800,12 +801,12 @@ export type components = { * Tokenizer * @description Info to load tokenizer submodel */ - tokenizer: components['schemas']['ModelInfo']; + tokenizer: components["schemas"]["ModelInfo"]; /** * Text Encoder * @description Info to load text_encoder submodel */ - text_encoder: components['schemas']['ModelInfo']; + text_encoder: components["schemas"]["ModelInfo"]; /** * Skipped Layers * @description Number of skipped layers in text_encoder @@ -815,7 +816,7 @@ export type components = { * Loras * @description Loras to apply on model loading */ - loras: components['schemas']['LoraInfo'][]; + loras: components["schemas"]["LoraInfo"][]; }; /** * CLIP Skip @@ -838,12 +839,12 @@ export type components = { * @default clip_skip * @enum {string} */ - type: 'clip_skip'; + type: "clip_skip"; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components['schemas']['ClipField']; + clip?: components["schemas"]["ClipField"]; /** * Skipped Layers * @description Number of layers to skip in text encoder @@ -861,12 +862,12 @@ export type components = { * @default clip_skip_output * @enum {string} */ - type: 'clip_skip_output'; + type: "clip_skip_output"; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components['schemas']['ClipField']; + clip?: components["schemas"]["ClipField"]; }; /** * CollectInvocation @@ -889,7 +890,7 @@ export type components = { * @default collect * @enum {string} */ - type: 'collect'; + type: "collect"; /** * Collection Item * @description The item to collect (all inputs must be of the same type) @@ -911,7 +912,7 @@ export type components = { * @default collect_output * @enum {string} */ - type: 'collect_output'; + type: "collect_output"; /** * Collection * @description The collection of input items @@ -928,12 +929,12 @@ export type components = { * @default color_collection_output * @enum {string} */ - type: 'color_collection_output'; + type: "color_collection_output"; /** * Collection * @description The output colors */ - collection: components['schemas']['ColorField'][]; + collection: components["schemas"]["ColorField"][]; }; /** * Color Correct @@ -957,22 +958,22 @@ export type components = { * @default color_correct * @enum {string} */ - type: 'color_correct'; + type: "color_correct"; /** * Image * @description The image to color-correct */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Reference * @description Reference image for color-correction */ - reference?: components['schemas']['ImageField']; + reference?: components["schemas"]["ImageField"]; /** * Mask * @description Mask to use when applying color-correction */ - mask?: components['schemas']['ImageField']; + mask?: components["schemas"]["ImageField"]; /** * Mask Blur Radius * @description Mask blur radius @@ -1027,7 +1028,7 @@ export type components = { * @default color * @enum {string} */ - type: 'color'; + type: "color"; /** * Color * @description The color value @@ -1038,7 +1039,7 @@ export type components = { * "a": 255 * } */ - color?: components['schemas']['ColorField']; + color?: components["schemas"]["ColorField"]; }; /** * ColorOutput @@ -1050,12 +1051,12 @@ export type components = { * @default color_output * @enum {string} */ - type: 'color_output'; + type: "color_output"; /** * Color * @description The output color */ - color: components['schemas']['ColorField']; + color: components["schemas"]["ColorField"]; }; /** * Compel Prompt @@ -1078,7 +1079,7 @@ export type components = { * @default compel * @enum {string} */ - type: 'compel'; + type: "compel"; /** * Prompt * @description Prompt to be parsed by Compel to create a conditioning tensor @@ -1089,7 +1090,7 @@ export type components = { * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components['schemas']['ClipField']; + clip?: components["schemas"]["ClipField"]; }; /** * Conditioning Primitive Collection @@ -1112,13 +1113,13 @@ export type components = { * @default conditioning_collection * @enum {string} */ - type: 'conditioning_collection'; + type: "conditioning_collection"; /** * Collection * @description The collection of conditioning tensors * @default 0 */ - collection?: components['schemas']['ConditioningField'][]; + collection?: components["schemas"]["ConditioningField"][]; }; /** * ConditioningCollectionOutput @@ -1130,12 +1131,12 @@ export type components = { * @default conditioning_collection_output * @enum {string} */ - type: 'conditioning_collection_output'; + type: "conditioning_collection_output"; /** * Collection * @description The output conditioning tensors */ - collection: components['schemas']['ConditioningField'][]; + collection: components["schemas"]["ConditioningField"][]; }; /** * ConditioningField @@ -1169,12 +1170,12 @@ export type components = { * @default conditioning * @enum {string} */ - type: 'conditioning'; + type: "conditioning"; /** * Conditioning * @description Conditioning tensor */ - conditioning?: components['schemas']['ConditioningField']; + conditioning?: components["schemas"]["ConditioningField"]; }; /** * ConditioningOutput @@ -1186,12 +1187,12 @@ export type components = { * @default conditioning_output * @enum {string} */ - type: 'conditioning_output'; + type: "conditioning_output"; /** * Conditioning * @description Conditioning tensor */ - conditioning: components['schemas']['ConditioningField']; + conditioning: components["schemas"]["ConditioningField"]; }; /** * Content Shuffle Processor @@ -1214,12 +1215,12 @@ export type components = { * @default content_shuffle_image_processor * @enum {string} */ - type: 'content_shuffle_image_processor'; + type: "content_shuffle_image_processor"; /** * Image * @description The image to process */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Detect Resolution * @description Pixel resolution for detection @@ -1257,12 +1258,12 @@ export type components = { * Image * @description The control image */ - image: components['schemas']['ImageField']; + image: components["schemas"]["ImageField"]; /** * Control Model * @description The ControlNet model to use */ - control_model: components['schemas']['ControlNetModelField']; + control_model: components["schemas"]["ControlNetModelField"]; /** * Control Weight * @description The weight given to the ControlNet @@ -1287,18 +1288,14 @@ export type components = { * @default balanced * @enum {string} */ - control_mode?: 'balanced' | 'more_prompt' | 'more_control' | 'unbalanced'; + control_mode?: "balanced" | "more_prompt" | "more_control" | "unbalanced"; /** * Resize Mode * @description The resize mode to use * @default just_resize * @enum {string} */ - resize_mode?: - | 'just_resize' - | 'crop_resize' - | 'fill_resize' - | 'just_resize_simple'; + resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; }; /** * ControlNet @@ -1321,18 +1318,18 @@ export type components = { * @default controlnet * @enum {string} */ - type: 'controlnet'; + type: "controlnet"; /** * Image * @description The control image */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Control Model * @description ControlNet model to load * @default lllyasviel/sd-controlnet-canny */ - control_model?: components['schemas']['ControlNetModelField']; + control_model?: components["schemas"]["ControlNetModelField"]; /** * Control Weight * @description The weight given to the ControlNet @@ -1357,29 +1354,25 @@ export type components = { * @default balanced * @enum {string} */ - control_mode?: 'balanced' | 'more_prompt' | 'more_control' | 'unbalanced'; + control_mode?: "balanced" | "more_prompt" | "more_control" | "unbalanced"; /** * Resize Mode * @description The resize mode used * @default just_resize * @enum {string} */ - resize_mode?: - | 'just_resize' - | 'crop_resize' - | 'fill_resize' - | 'just_resize_simple'; + resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; }; /** ControlNetModelCheckpointConfig */ ControlNetModelCheckpointConfig: { /** Model Name */ model_name: string; - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** * Model Type * @enum {string} */ - model_type: 'controlnet'; + model_type: "controlnet"; /** Path */ path: string; /** Description */ @@ -1388,8 +1381,8 @@ export type components = { * Model Format * @enum {string} */ - model_format: 'checkpoint'; - error?: components['schemas']['ModelError']; + model_format: "checkpoint"; + error?: components["schemas"]["ModelError"]; /** Config */ config: string; }; @@ -1397,12 +1390,12 @@ export type components = { ControlNetModelDiffusersConfig: { /** Model Name */ model_name: string; - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** * Model Type * @enum {string} */ - model_type: 'controlnet'; + model_type: "controlnet"; /** Path */ path: string; /** Description */ @@ -1411,8 +1404,8 @@ export type components = { * Model Format * @enum {string} */ - model_format: 'diffusers'; - error?: components['schemas']['ModelError']; + model_format: "diffusers"; + error?: components["schemas"]["ModelError"]; }; /** * ControlNetModelField @@ -1425,7 +1418,7 @@ export type components = { */ model_name: string; /** @description Base model */ - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; }; /** * ControlOutput @@ -1437,12 +1430,12 @@ export type components = { * @default control_output * @enum {string} */ - type: 'control_output'; + type: "control_output"; /** * Control * @description ControlNet(s) to apply */ - control: components['schemas']['ControlField']; + control: components["schemas"]["ControlField"]; }; /** * CoreMetadata @@ -1519,22 +1512,22 @@ export type components = { * Model * @description The main model used for inference */ - model: components['schemas']['MainModelField']; + model: components["schemas"]["MainModelField"]; /** * Controlnets * @description The ControlNets used for inference */ - controlnets: components['schemas']['ControlField'][]; + controlnets: components["schemas"]["ControlField"][]; /** * Loras * @description The LoRAs used for inference */ - loras: components['schemas']['LoRAMetadataField'][]; + loras: components["schemas"]["LoRAMetadataField"][]; /** * Vae * @description The VAE used for decoding, if the main model's default was not used */ - vae?: components['schemas']['VAEModelField']; + vae?: components["schemas"]["VAEModelField"]; /** * Strength * @description The strength used for latents-to-latents @@ -1559,7 +1552,7 @@ export type components = { * Refiner Model * @description The SDXL Refiner model used */ - refiner_model?: components['schemas']['MainModelField']; + refiner_model?: components["schemas"]["MainModelField"]; /** * Refiner Cfg Scale * @description The classifier-free guidance scale parameter used for the refiner @@ -1591,6 +1584,56 @@ export type components = { */ refiner_start?: number; }; + /** + * Create Inpaint Mask + * @description Creates mask for inpaint model run. + */ + CreateInpaintMaskInvocation: { + /** + * Id + * @description The id of this node. Must be unique among all nodes. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this node is an intermediate node. + * @default false + */ + is_intermediate?: boolean; + /** + * Type + * @default create_inpaint_mask + * @enum {string} + */ + type: "create_inpaint_mask"; + /** + * Image + * @description Image which will be inpainted + */ + image?: components["schemas"]["ImageField"]; + /** + * Mask + * @description The mask to use when pasting + */ + mask?: components["schemas"]["ImageField"]; + /** + * Vae + * @description VAE + */ + vae?: components["schemas"]["VaeField"]; + /** + * Tiled + * @description Processing using overlapping tiles (reduce memory consumption) + * @default false + */ + tiled?: boolean; + /** + * Fp32 + * @description Whether or not to use full float32 precision + * @default false + */ + fp32?: boolean; + }; /** * OpenCV Inpaint * @description Simple inpaint using opencv. @@ -1612,17 +1655,17 @@ export type components = { * @default cv_inpaint * @enum {string} */ - type: 'cv_inpaint'; + type: "cv_inpaint"; /** * Image * @description The image to inpaint */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Mask * @description The mask to use when inpainting */ - mask?: components['schemas']['ImageField']; + mask?: components["schemas"]["ImageField"]; }; /** DeleteBoardResult */ DeleteBoardResult: { @@ -1668,12 +1711,12 @@ export type components = { * @default denoise_latents * @enum {string} */ - type: 'denoise_latents'; + type: "denoise_latents"; /** * Noise * @description Noise tensor */ - noise?: components['schemas']['LatentsField']; + noise?: components["schemas"]["LatentsField"]; /** * Steps * @description Number of steps to run @@ -1704,61 +1747,37 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: - | 'ddim' - | 'ddpm' - | 'deis' - | 'lms' - | 'lms_k' - | 'pndm' - | 'heun' - | 'heun_k' - | 'euler' - | 'euler_k' - | 'euler_a' - | 'kdpm_2' - | 'kdpm_2_a' - | 'dpmpp_2s' - | 'dpmpp_2s_k' - | 'dpmpp_2m' - | 'dpmpp_2m_k' - | 'dpmpp_2m_sde' - | 'dpmpp_2m_sde_k' - | 'dpmpp_sde' - | 'dpmpp_sde_k' - | 'unipc'; + scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; /** * Control * @description ControlNet(s) to apply */ - control?: - | components['schemas']['ControlField'] - | components['schemas']['ControlField'][]; + control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][]; /** * Latents * @description Latents tensor */ - latents?: components['schemas']['LatentsField']; + latents?: components["schemas"]["LatentsField"]; /** * Mask * @description The mask to use for the operation */ - mask?: components['schemas']['InpaintMaskField']; + mask?: components["schemas"]["InpaintMaskField"]; /** * Positive Conditioning * @description Positive conditioning tensor */ - positive_conditioning?: components['schemas']['ConditioningField']; + positive_conditioning?: components["schemas"]["ConditioningField"]; /** * Negative Conditioning * @description Negative conditioning tensor */ - negative_conditioning?: components['schemas']['ConditioningField']; + negative_conditioning?: components["schemas"]["ConditioningField"]; /** * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components['schemas']['UNetField']; + unet?: components["schemas"]["UNetField"]; }; /** * Divide Integers @@ -1781,7 +1800,7 @@ export type components = { * @default div * @enum {string} */ - type: 'div'; + type: "div"; /** * A * @description The first number @@ -1816,7 +1835,7 @@ export type components = { * @default dynamic_prompt * @enum {string} */ - type: 'dynamic_prompt'; + type: "dynamic_prompt"; /** * Prompt * @description The prompt to parse with dynamicprompts @@ -1856,23 +1875,19 @@ export type components = { * @default esrgan * @enum {string} */ - type: 'esrgan'; + type: "esrgan"; /** * Image * @description The input image */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Model Name * @description The Real-ESRGAN model to use * @default RealESRGAN_x4plus.pth * @enum {string} */ - model_name?: - | 'RealESRGAN_x4plus.pth' - | 'RealESRGAN_x4plus_anime_6B.pth' - | 'ESRGAN_SRx4_DF2KOST_official-ff704c30.pth' - | 'RealESRGAN_x2plus.pth'; + model_name?: "RealESRGAN_x4plus.pth" | "RealESRGAN_x4plus_anime_6B.pth" | "ESRGAN_SRx4_DF2KOST_official-ff704c30.pth" | "RealESRGAN_x2plus.pth"; }; /** Edge */ Edge: { @@ -1880,12 +1895,12 @@ export type components = { * Source * @description The connection for the edge's from node and field */ - source: components['schemas']['EdgeConnection']; + source: components["schemas"]["EdgeConnection"]; /** * Destination * @description The connection for the edge's to node and field */ - destination: components['schemas']['EdgeConnection']; + destination: components["schemas"]["EdgeConnection"]; }; /** EdgeConnection */ EdgeConnection: { @@ -1921,7 +1936,7 @@ export type components = { * @default float_collection * @enum {string} */ - type: 'float_collection'; + type: "float_collection"; /** * Collection * @description The collection of float values @@ -1938,7 +1953,7 @@ export type components = { * @default float_collection_output * @enum {string} */ - type: 'float_collection_output'; + type: "float_collection_output"; /** * Collection * @description The float collection @@ -1966,7 +1981,7 @@ export type components = { * @default float * @enum {string} */ - type: 'float'; + type: "float"; /** * Value * @description The float value @@ -1995,7 +2010,7 @@ export type components = { * @default float_range * @enum {string} */ - type: 'float_range'; + type: "float_range"; /** * Start * @description The first value of the range @@ -2025,7 +2040,7 @@ export type components = { * @default float_output * @enum {string} */ - type: 'float_output'; + type: "float_output"; /** * Value * @description The output float @@ -2044,109 +2059,13 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: - | components['schemas']['BooleanInvocation'] - | components['schemas']['BooleanCollectionInvocation'] - | components['schemas']['IntegerInvocation'] - | components['schemas']['IntegerCollectionInvocation'] - | components['schemas']['FloatInvocation'] - | components['schemas']['FloatCollectionInvocation'] - | components['schemas']['StringInvocation'] - | components['schemas']['StringCollectionInvocation'] - | components['schemas']['ImageInvocation'] - | components['schemas']['ImageCollectionInvocation'] - | components['schemas']['LatentsInvocation'] - | components['schemas']['LatentsCollectionInvocation'] - | components['schemas']['ColorInvocation'] - | components['schemas']['ConditioningInvocation'] - | components['schemas']['ConditioningCollectionInvocation'] - | components['schemas']['ControlNetInvocation'] - | components['schemas']['ImageProcessorInvocation'] - | components['schemas']['MainModelLoaderInvocation'] - | components['schemas']['LoraLoaderInvocation'] - | components['schemas']['SDXLLoraLoaderInvocation'] - | components['schemas']['VaeLoaderInvocation'] - | components['schemas']['MetadataAccumulatorInvocation'] - | components['schemas']['RangeInvocation'] - | components['schemas']['RangeOfSizeInvocation'] - | components['schemas']['RandomRangeInvocation'] - | components['schemas']['CompelInvocation'] - | components['schemas']['SDXLCompelPromptInvocation'] - | components['schemas']['SDXLRefinerCompelPromptInvocation'] - | components['schemas']['ClipSkipInvocation'] - | components['schemas']['CvInpaintInvocation'] - | components['schemas']['ShowImageInvocation'] - | components['schemas']['BlankImageInvocation'] - | components['schemas']['ImageCropInvocation'] - | components['schemas']['ImagePasteInvocation'] - | components['schemas']['MaskFromAlphaInvocation'] - | components['schemas']['ImageMultiplyInvocation'] - | components['schemas']['ImageChannelInvocation'] - | components['schemas']['ImageConvertInvocation'] - | components['schemas']['ImageBlurInvocation'] - | components['schemas']['ImageResizeInvocation'] - | components['schemas']['ImageScaleInvocation'] - | components['schemas']['ImageLerpInvocation'] - | components['schemas']['ImageInverseLerpInvocation'] - | components['schemas']['ImageNSFWBlurInvocation'] - | components['schemas']['ImageWatermarkInvocation'] - | components['schemas']['MaskEdgeInvocation'] - | components['schemas']['MaskCombineInvocation'] - | components['schemas']['ColorCorrectInvocation'] - | components['schemas']['ImageHueAdjustmentInvocation'] - | components['schemas']['ImageLuminosityAdjustmentInvocation'] - | components['schemas']['ImageSaturationAdjustmentInvocation'] - | components['schemas']['InfillColorInvocation'] - | components['schemas']['InfillTileInvocation'] - | components['schemas']['InfillPatchMatchInvocation'] - | components['schemas']['LaMaInfillInvocation'] - | components['schemas']['DenoiseLatentsInvocation'] - | components['schemas']['LatentsToImageInvocation'] - | components['schemas']['ResizeLatentsInvocation'] - | components['schemas']['ScaleLatentsInvocation'] - | components['schemas']['ImageToLatentsInvocation'] - | components['schemas']['BlendLatentsInvocation'] - | components['schemas']['AddInvocation'] - | components['schemas']['SubtractInvocation'] - | components['schemas']['MultiplyInvocation'] - | components['schemas']['DivideInvocation'] - | components['schemas']['RandomIntInvocation'] - | components['schemas']['NoiseInvocation'] - | components['schemas']['ONNXPromptInvocation'] - | components['schemas']['ONNXTextToLatentsInvocation'] - | components['schemas']['ONNXLatentsToImageInvocation'] - | components['schemas']['OnnxModelLoaderInvocation'] - | components['schemas']['FloatLinearRangeInvocation'] - | components['schemas']['StepParamEasingInvocation'] - | components['schemas']['DynamicPromptInvocation'] - | components['schemas']['PromptsFromFileInvocation'] - | components['schemas']['SDXLModelLoaderInvocation'] - | components['schemas']['SDXLRefinerModelLoaderInvocation'] - | components['schemas']['ESRGANInvocation'] - | components['schemas']['GraphInvocation'] - | components['schemas']['IterateInvocation'] - | components['schemas']['CollectInvocation'] - | components['schemas']['CannyImageProcessorInvocation'] - | components['schemas']['HedImageProcessorInvocation'] - | components['schemas']['LineartImageProcessorInvocation'] - | components['schemas']['LineartAnimeImageProcessorInvocation'] - | components['schemas']['OpenposeImageProcessorInvocation'] - | components['schemas']['MidasDepthImageProcessorInvocation'] - | components['schemas']['NormalbaeImageProcessorInvocation'] - | components['schemas']['MlsdImageProcessorInvocation'] - | components['schemas']['PidiImageProcessorInvocation'] - | components['schemas']['ContentShuffleImageProcessorInvocation'] - | components['schemas']['ZoeDepthImageProcessorInvocation'] - | components['schemas']['MediapipeFaceProcessorInvocation'] - | components['schemas']['LeresImageProcessorInvocation'] - | components['schemas']['TileResamplerProcessorInvocation'] - | components['schemas']['SegmentAnythingProcessorInvocation']; + [key: string]: components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CreateInpaintMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; /** * Edges * @description The connections between nodes and their fields in this graph */ - edges?: components['schemas']['Edge'][]; + edges?: components["schemas"]["Edge"][]; }; /** * GraphExecutionState @@ -2162,12 +2081,12 @@ export type components = { * Graph * @description The graph being executed */ - graph: components['schemas']['Graph']; + graph: components["schemas"]["Graph"]; /** * Execution Graph * @description The expanded graph of activated and executed nodes */ - execution_graph: components['schemas']['Graph']; + execution_graph: components["schemas"]["Graph"]; /** * Executed * @description The set of node ids that have been executed @@ -2183,37 +2102,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: - | components['schemas']['BooleanOutput'] - | components['schemas']['BooleanCollectionOutput'] - | components['schemas']['IntegerOutput'] - | components['schemas']['IntegerCollectionOutput'] - | components['schemas']['FloatOutput'] - | components['schemas']['FloatCollectionOutput'] - | components['schemas']['StringOutput'] - | components['schemas']['StringCollectionOutput'] - | components['schemas']['ImageOutput'] - | components['schemas']['ImageCollectionOutput'] - | components['schemas']['LatentsOutput'] - | components['schemas']['LatentsCollectionOutput'] - | components['schemas']['ColorOutput'] - | components['schemas']['ColorCollectionOutput'] - | components['schemas']['ConditioningOutput'] - | components['schemas']['ConditioningCollectionOutput'] - | components['schemas']['ControlOutput'] - | components['schemas']['ModelLoaderOutput'] - | components['schemas']['LoraLoaderOutput'] - | components['schemas']['SDXLLoraLoaderOutput'] - | components['schemas']['VaeLoaderOutput'] - | components['schemas']['MetadataAccumulatorOutput'] - | components['schemas']['ClipSkipInvocationOutput'] - | components['schemas']['NoiseOutput'] - | components['schemas']['ONNXModelLoaderOutput'] - | components['schemas']['SDXLModelLoaderOutput'] - | components['schemas']['SDXLRefinerModelLoaderOutput'] - | components['schemas']['GraphInvocationOutput'] - | components['schemas']['IterateInvocationOutput'] - | components['schemas']['CollectInvocationOutput']; + [key: string]: components["schemas"]["BooleanOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["InpaintMaskOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"]; }; /** * Errors @@ -2258,12 +2147,12 @@ export type components = { * @default graph * @enum {string} */ - type: 'graph'; + type: "graph"; /** * Graph * @description The graph to run */ - graph?: components['schemas']['Graph']; + graph?: components["schemas"]["Graph"]; }; /** * GraphInvocationOutput @@ -2275,12 +2164,12 @@ export type components = { * @default graph_output * @enum {string} */ - type: 'graph_output'; + type: "graph_output"; }; /** HTTPValidationError */ HTTPValidationError: { /** Detail */ - detail?: components['schemas']['ValidationError'][]; + detail?: components["schemas"]["ValidationError"][]; }; /** * HED (softedge) Processor @@ -2303,12 +2192,12 @@ export type components = { * @default hed_image_processor * @enum {string} */ - type: 'hed_image_processor'; + type: "hed_image_processor"; /** * Image * @description The image to process */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Detect Resolution * @description Pixel resolution for detection @@ -2349,12 +2238,12 @@ export type components = { * @default img_blur * @enum {string} */ - type: 'img_blur'; + type: "img_blur"; /** * Image * @description The image to blur */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Radius * @description The blur radius @@ -2367,7 +2256,7 @@ export type components = { * @default gaussian * @enum {string} */ - blur_type?: 'gaussian' | 'box'; + blur_type?: "gaussian" | "box"; }; /** * ImageCategory @@ -2380,7 +2269,7 @@ export type components = { * - OTHER: The image is some other type of image with a specialized purpose. To be used by external nodes. * @enum {string} */ - ImageCategory: 'general' | 'mask' | 'control' | 'user' | 'other'; + ImageCategory: "general" | "mask" | "control" | "user" | "other"; /** * Extract Image Channel * @description Gets a channel from an image. @@ -2402,19 +2291,19 @@ export type components = { * @default img_chan * @enum {string} */ - type: 'img_chan'; + type: "img_chan"; /** * Image * @description The image to get the channel from */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Channel * @description The channel to get * @default A * @enum {string} */ - channel?: 'A' | 'R' | 'G' | 'B'; + channel?: "A" | "R" | "G" | "B"; }; /** * Image Primitive Collection @@ -2437,13 +2326,13 @@ export type components = { * @default image_collection * @enum {string} */ - type: 'image_collection'; + type: "image_collection"; /** * Collection * @description The collection of image values * @default 0 */ - collection?: components['schemas']['ImageField'][]; + collection?: components["schemas"]["ImageField"][]; }; /** * ImageCollectionOutput @@ -2455,12 +2344,12 @@ export type components = { * @default image_collection_output * @enum {string} */ - type: 'image_collection_output'; + type: "image_collection_output"; /** * Collection * @description The output images */ - collection: components['schemas']['ImageField'][]; + collection: components["schemas"]["ImageField"][]; }; /** * Convert Image Mode @@ -2483,28 +2372,19 @@ export type components = { * @default img_conv * @enum {string} */ - type: 'img_conv'; + type: "img_conv"; /** * Image * @description The image to convert */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Mode * @description The mode to convert to * @default L * @enum {string} */ - mode?: - | 'L' - | 'RGB' - | 'RGBA' - | 'CMYK' - | 'YCbCr' - | 'LAB' - | 'HSV' - | 'I' - | 'F'; + mode?: "L" | "RGB" | "RGBA" | "CMYK" | "YCbCr" | "LAB" | "HSV" | "I" | "F"; }; /** * Crop Image @@ -2527,12 +2407,12 @@ export type components = { * @default img_crop * @enum {string} */ - type: 'img_crop'; + type: "img_crop"; /** * Image * @description The image to crop */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * X * @description The left x coordinate of the crop rectangle @@ -2579,9 +2459,9 @@ export type components = { */ thumbnail_url: string; /** @description The type of the image. */ - image_origin: components['schemas']['ResourceOrigin']; + image_origin: components["schemas"]["ResourceOrigin"]; /** @description The category of the image. */ - image_category: components['schemas']['ImageCategory']; + image_category: components["schemas"]["ImageCategory"]; /** * Width * @description The width of the image in px. @@ -2665,12 +2545,12 @@ export type components = { * @default img_hue_adjust * @enum {string} */ - type: 'img_hue_adjust'; + type: "img_hue_adjust"; /** * Image * @description The image to adjust */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Hue * @description The degrees by which to rotate the hue, 0-360 @@ -2699,12 +2579,12 @@ export type components = { * @default img_ilerp * @enum {string} */ - type: 'img_ilerp'; + type: "img_ilerp"; /** * Image * @description The image to lerp */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Min * @description The minimum input value @@ -2739,12 +2619,12 @@ export type components = { * @default image * @enum {string} */ - type: 'image'; + type: "image"; /** * Image * @description The image to load */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; }; /** * Lerp Image @@ -2767,12 +2647,12 @@ export type components = { * @default img_lerp * @enum {string} */ - type: 'img_lerp'; + type: "img_lerp"; /** * Image * @description The image to lerp */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Min * @description The minimum output value @@ -2807,12 +2687,12 @@ export type components = { * @default img_luminosity_adjust * @enum {string} */ - type: 'img_luminosity_adjust'; + type: "img_luminosity_adjust"; /** * Image * @description The image to adjust */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Luminosity * @description The factor by which to adjust the luminosity (value) @@ -2857,17 +2737,17 @@ export type components = { * @default img_mul * @enum {string} */ - type: 'img_mul'; + type: "img_mul"; /** * Image1 * @description The first image to multiply */ - image1?: components['schemas']['ImageField']; + image1?: components["schemas"]["ImageField"]; /** * Image2 * @description The second image to multiply */ - image2?: components['schemas']['ImageField']; + image2?: components["schemas"]["ImageField"]; }; /** * Blur NSFW Image @@ -2890,17 +2770,17 @@ export type components = { * @default img_nsfw * @enum {string} */ - type: 'img_nsfw'; + type: "img_nsfw"; /** * Metadata * @description Optional core metadata to be written to image */ - metadata?: components['schemas']['CoreMetadata']; + metadata?: components["schemas"]["CoreMetadata"]; /** * Image * @description The image to check */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; }; /** * ImageOutput @@ -2912,12 +2792,12 @@ export type components = { * @default image_output * @enum {string} */ - type: 'image_output'; + type: "image_output"; /** * Image * @description The output image */ - image: components['schemas']['ImageField']; + image: components["schemas"]["ImageField"]; /** * Width * @description The width of the image in pixels @@ -2950,22 +2830,22 @@ export type components = { * @default img_paste * @enum {string} */ - type: 'img_paste'; + type: "img_paste"; /** * Base Image * @description The base image */ - base_image?: components['schemas']['ImageField']; + base_image?: components["schemas"]["ImageField"]; /** * Image * @description The image to paste */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Mask * @description The mask to use when pasting */ - mask?: components['schemas']['ImageField']; + mask?: components["schemas"]["ImageField"]; /** * X * @description The left x coordinate at which to paste the image @@ -3000,12 +2880,12 @@ export type components = { * @default image_processor * @enum {string} */ - type: 'image_processor'; + type: "image_processor"; /** * Image * @description The image to process */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; }; /** * ImageRecordChanges @@ -3019,7 +2899,7 @@ export type components = { */ ImageRecordChanges: { /** @description The image's new category. */ - image_category?: components['schemas']['ImageCategory']; + image_category?: components["schemas"]["ImageCategory"]; /** * Session Id * @description The image's new session ID. @@ -3057,12 +2937,12 @@ export type components = { * @default img_resize * @enum {string} */ - type: 'img_resize'; + type: "img_resize"; /** * Image * @description The image to resize */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Width * @description The width to resize to (px) @@ -3081,13 +2961,7 @@ export type components = { * @default bicubic * @enum {string} */ - resample_mode?: - | 'nearest' - | 'box' - | 'bilinear' - | 'hamming' - | 'bicubic' - | 'lanczos'; + resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; }; /** * Image Saturation Adjustment @@ -3110,12 +2984,12 @@ export type components = { * @default img_saturation_adjust * @enum {string} */ - type: 'img_saturation_adjust'; + type: "img_saturation_adjust"; /** * Image * @description The image to adjust */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Saturation * @description The factor by which to adjust the saturation @@ -3144,12 +3018,12 @@ export type components = { * @default img_scale * @enum {string} */ - type: 'img_scale'; + type: "img_scale"; /** * Image * @description The image to scale */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Scale Factor * @description The factor by which to scale the image @@ -3162,13 +3036,7 @@ export type components = { * @default bicubic * @enum {string} */ - resample_mode?: - | 'nearest' - | 'box' - | 'bilinear' - | 'hamming' - | 'bicubic' - | 'lanczos'; + resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; }; /** * Image to Latents @@ -3191,17 +3059,17 @@ export type components = { * @default i2l * @enum {string} */ - type: 'i2l'; + type: "i2l"; /** * Image * @description The image to encode */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Vae * @description VAE */ - vae?: components['schemas']['VaeField']; + vae?: components["schemas"]["VaeField"]; /** * Tiled * @description Processing using overlapping tiles (reduce memory consumption) @@ -3257,12 +3125,12 @@ export type components = { * @default img_watermark * @enum {string} */ - type: 'img_watermark'; + type: "img_watermark"; /** * Image * @description The image to check */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Text * @description Watermark text @@ -3273,7 +3141,7 @@ export type components = { * Metadata * @description Optional core metadata to be written to image */ - metadata?: components['schemas']['CoreMetadata']; + metadata?: components["schemas"]["CoreMetadata"]; }; /** ImagesUpdatedFromListResult */ ImagesUpdatedFromListResult: { @@ -3304,12 +3172,12 @@ export type components = { * @default infill_rgba * @enum {string} */ - type: 'infill_rgba'; + type: "infill_rgba"; /** * Image * @description The image to infill */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Color * @description The color to use to infill @@ -3320,7 +3188,7 @@ export type components = { * "a": 255 * } */ - color?: components['schemas']['ColorField']; + color?: components["schemas"]["ColorField"]; }; /** * PatchMatch Infill @@ -3343,12 +3211,12 @@ export type components = { * @default infill_patchmatch * @enum {string} */ - type: 'infill_patchmatch'; + type: "infill_patchmatch"; /** * Image * @description The image to infill */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; }; /** * Tile Infill @@ -3371,12 +3239,12 @@ export type components = { * @default infill_tile * @enum {string} */ - type: 'infill_tile'; + type: "infill_tile"; /** * Image * @description The image to infill */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Tile Size * @description The tile size (px) @@ -3400,10 +3268,27 @@ export type components = { */ mask_name: string; /** - * Masked Latens Name + * Masked Latents Name * @description The name of the masked image latents */ - masked_latens_name?: string; + masked_latents_name?: string; + }; + /** + * InpaintMaskOutput + * @description Base class for nodes that output a single image + */ + InpaintMaskOutput: { + /** + * Type + * @default inpaint_mask_output + * @enum {string} + */ + type: "inpaint_mask_output"; + /** + * Inpaint Mask + * @description Mask for inpaint model run + */ + inpaint_mask: components["schemas"]["InpaintMaskField"]; }; /** * Integer Primitive Collection @@ -3426,7 +3311,7 @@ export type components = { * @default integer_collection * @enum {string} */ - type: 'integer_collection'; + type: "integer_collection"; /** * Collection * @description The collection of integer values @@ -3444,7 +3329,7 @@ export type components = { * @default integer_collection_output * @enum {string} */ - type: 'integer_collection_output'; + type: "integer_collection_output"; /** * Collection * @description The int collection @@ -3472,7 +3357,7 @@ export type components = { * @default integer * @enum {string} */ - type: 'integer'; + type: "integer"; /** * Value * @description The integer value @@ -3490,7 +3375,7 @@ export type components = { * @default integer_output * @enum {string} */ - type: 'integer_output'; + type: "integer_output"; /** * Value * @description The output integer @@ -3518,7 +3403,7 @@ export type components = { * @default iterate * @enum {string} */ - type: 'iterate'; + type: "iterate"; /** * Collection * @description The list of items to iterate over @@ -3541,7 +3426,7 @@ export type components = { * @default iterate_output * @enum {string} */ - type: 'iterate_output'; + type: "iterate_output"; /** * Collection Item * @description The item being iterated over @@ -3569,12 +3454,12 @@ export type components = { * @default infill_lama * @enum {string} */ - type: 'infill_lama'; + type: "infill_lama"; /** * Image * @description The image to infill */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; }; /** * Latents Primitive Collection @@ -3597,12 +3482,12 @@ export type components = { * @default latents_collection * @enum {string} */ - type: 'latents_collection'; + type: "latents_collection"; /** * Collection * @description The collection of latents tensors */ - collection?: components['schemas']['LatentsField'][]; + collection?: components["schemas"]["LatentsField"][]; }; /** * LatentsCollectionOutput @@ -3614,12 +3499,12 @@ export type components = { * @default latents_collection_output * @enum {string} */ - type: 'latents_collection_output'; + type: "latents_collection_output"; /** * Collection * @description Latents tensor */ - collection: components['schemas']['LatentsField'][]; + collection: components["schemas"]["LatentsField"][]; }; /** * LatentsField @@ -3658,12 +3543,12 @@ export type components = { * @default latents * @enum {string} */ - type: 'latents'; + type: "latents"; /** * Latents * @description The latents tensor */ - latents?: components['schemas']['LatentsField']; + latents?: components["schemas"]["LatentsField"]; }; /** * LatentsOutput @@ -3675,12 +3560,12 @@ export type components = { * @default latents_output * @enum {string} */ - type: 'latents_output'; + type: "latents_output"; /** * Latents * @description Latents tensor */ - latents: components['schemas']['LatentsField']; + latents: components["schemas"]["LatentsField"]; /** * Width * @description Width of output (px) @@ -3713,7 +3598,7 @@ export type components = { * @default l2i * @enum {string} */ - type: 'l2i'; + type: "l2i"; /** * Tiled * @description Processing using overlapping tiles (reduce memory consumption) @@ -3730,17 +3615,17 @@ export type components = { * Metadata * @description Optional core metadata to be written to image */ - metadata?: components['schemas']['CoreMetadata']; + metadata?: components["schemas"]["CoreMetadata"]; /** * Latents * @description Latents tensor */ - latents?: components['schemas']['LatentsField']; + latents?: components["schemas"]["LatentsField"]; /** * Vae * @description VAE */ - vae?: components['schemas']['VaeField']; + vae?: components["schemas"]["VaeField"]; }; /** * Leres (Depth) Processor @@ -3763,12 +3648,12 @@ export type components = { * @default leres_image_processor * @enum {string} */ - type: 'leres_image_processor'; + type: "leres_image_processor"; /** * Image * @description The image to process */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Thr A * @description Leres parameter `thr_a` @@ -3821,12 +3706,12 @@ export type components = { * @default lineart_anime_image_processor * @enum {string} */ - type: 'lineart_anime_image_processor'; + type: "lineart_anime_image_processor"; /** * Image * @description The image to process */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Detect Resolution * @description Pixel resolution for detection @@ -3861,12 +3746,12 @@ export type components = { * @default lineart_image_processor * @enum {string} */ - type: 'lineart_image_processor'; + type: "lineart_image_processor"; /** * Image * @description The image to process */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Detect Resolution * @description Pixel resolution for detection @@ -3895,7 +3780,7 @@ export type components = { * Lora * @description The LoRA model */ - lora: components['schemas']['LoRAModelField']; + lora: components["schemas"]["LoRAModelField"]; /** * Weight * @description The weight of the LoRA model @@ -3906,18 +3791,18 @@ export type components = { LoRAModelConfig: { /** Model Name */ model_name: string; - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** * Model Type * @enum {string} */ - model_type: 'lora'; + model_type: "lora"; /** Path */ path: string; /** Description */ description?: string; - model_format: components['schemas']['LoRAModelFormat']; - error?: components['schemas']['ModelError']; + model_format: components["schemas"]["LoRAModelFormat"]; + error?: components["schemas"]["ModelError"]; }; /** * LoRAModelField @@ -3930,14 +3815,14 @@ export type components = { */ model_name: string; /** @description Base model */ - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; }; /** * LoRAModelFormat * @description An enumeration. * @enum {string} */ - LoRAModelFormat: 'lycoris' | 'diffusers'; + LoRAModelFormat: "lycoris" | "diffusers"; /** * LogLevel * @description An enumeration. @@ -3952,11 +3837,11 @@ export type components = { */ model_name: string; /** @description Base model */ - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** @description Info to load submodel */ - model_type: components['schemas']['ModelType']; + model_type: components["schemas"]["ModelType"]; /** @description Info to load submodel */ - submodel?: components['schemas']['SubModelType']; + submodel?: components["schemas"]["SubModelType"]; /** * Weight * @description Lora's weight which to use when apply to model @@ -3984,12 +3869,12 @@ export type components = { * @default lora_loader * @enum {string} */ - type: 'lora_loader'; + type: "lora_loader"; /** * LoRA * @description LoRA model to load */ - lora: components['schemas']['LoRAModelField']; + lora: components["schemas"]["LoRAModelField"]; /** * Weight * @description The weight at which the LoRA is applied to each model @@ -4000,12 +3885,12 @@ export type components = { * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components['schemas']['UNetField']; + unet?: components["schemas"]["UNetField"]; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components['schemas']['ClipField']; + clip?: components["schemas"]["ClipField"]; }; /** * LoraLoaderOutput @@ -4017,17 +3902,17 @@ export type components = { * @default lora_loader_output * @enum {string} */ - type: 'lora_loader_output'; + type: "lora_loader_output"; /** * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components['schemas']['UNetField']; + unet?: components["schemas"]["UNetField"]; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components['schemas']['ClipField']; + clip?: components["schemas"]["ClipField"]; }; /** * MainModelField @@ -4040,9 +3925,9 @@ export type components = { */ model_name: string; /** @description Base model */ - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** @description Model Type */ - model_type: components['schemas']['ModelType']; + model_type: components["schemas"]["ModelType"]; }; /** * Main Model @@ -4065,12 +3950,12 @@ export type components = { * @default main_model_loader * @enum {string} */ - type: 'main_model_loader'; + type: "main_model_loader"; /** * Model * @description Main model (UNet, VAE, CLIP) to load */ - model: components['schemas']['MainModelField']; + model: components["schemas"]["MainModelField"]; }; /** * Combine Mask @@ -4093,17 +3978,17 @@ export type components = { * @default mask_combine * @enum {string} */ - type: 'mask_combine'; + type: "mask_combine"; /** * Mask1 * @description The first mask to combine */ - mask1?: components['schemas']['ImageField']; + mask1?: components["schemas"]["ImageField"]; /** * Mask2 * @description The second image to combine */ - mask2?: components['schemas']['ImageField']; + mask2?: components["schemas"]["ImageField"]; }; /** * Mask Edge @@ -4126,12 +4011,12 @@ export type components = { * @default mask_edge * @enum {string} */ - type: 'mask_edge'; + type: "mask_edge"; /** * Image * @description The image to apply the mask to */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Edge Size * @description The size of the edge @@ -4174,12 +4059,12 @@ export type components = { * @default tomask * @enum {string} */ - type: 'tomask'; + type: "tomask"; /** * Image * @description The image to create the mask from */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Invert * @description Whether or not to invert the mask @@ -4208,12 +4093,12 @@ export type components = { * @default mediapipe_face_processor * @enum {string} */ - type: 'mediapipe_face_processor'; + type: "mediapipe_face_processor"; /** * Image * @description The image to process */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Max Faces * @description Maximum number of faces to detect @@ -4232,11 +4117,7 @@ export type components = { * @description An enumeration. * @enum {string} */ - MergeInterpolationMethod: - | 'weighted_sum' - | 'sigmoid' - | 'inv_sigmoid' - | 'add_difference'; + MergeInterpolationMethod: "weighted_sum" | "sigmoid" | "inv_sigmoid" | "add_difference"; /** * Metadata Accumulator * @description Outputs a Core Metadata Object @@ -4258,7 +4139,7 @@ export type components = { * @default metadata_accumulator * @enum {string} */ - type: 'metadata_accumulator'; + type: "metadata_accumulator"; /** * Generation Mode * @description The generation mode that output this image @@ -4318,17 +4199,17 @@ export type components = { * Model * @description The main model used for inference */ - model?: components['schemas']['MainModelField']; + model?: components["schemas"]["MainModelField"]; /** * Controlnets * @description The ControlNets used for inference */ - controlnets?: components['schemas']['ControlField'][]; + controlnets?: components["schemas"]["ControlField"][]; /** * Loras * @description The LoRAs used for inference */ - loras?: components['schemas']['LoRAMetadataField'][]; + loras?: components["schemas"]["LoRAMetadataField"][]; /** * Strength * @description The strength used for latents-to-latents @@ -4343,7 +4224,7 @@ export type components = { * Vae * @description The VAE used for decoding, if the main model's default was not used */ - vae?: components['schemas']['VAEModelField']; + vae?: components["schemas"]["VAEModelField"]; /** * Positive Style Prompt * @description The positive style prompt parameter @@ -4358,7 +4239,7 @@ export type components = { * Refiner Model * @description The SDXL Refiner model used */ - refiner_model?: components['schemas']['MainModelField']; + refiner_model?: components["schemas"]["MainModelField"]; /** * Refiner Cfg Scale * @description The classifier-free guidance scale parameter used for the refiner @@ -4400,12 +4281,12 @@ export type components = { * @default metadata_accumulator_output * @enum {string} */ - type: 'metadata_accumulator_output'; + type: "metadata_accumulator_output"; /** * Metadata * @description The core metadata for the image */ - metadata: components['schemas']['CoreMetadata']; + metadata: components["schemas"]["CoreMetadata"]; }; /** * Midas (Depth) Processor @@ -4428,12 +4309,12 @@ export type components = { * @default midas_depth_image_processor * @enum {string} */ - type: 'midas_depth_image_processor'; + type: "midas_depth_image_processor"; /** * Image * @description The image to process */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * A Mult * @description Midas parameter `a_mult` (a = a_mult * PI) @@ -4468,12 +4349,12 @@ export type components = { * @default mlsd_image_processor * @enum {string} */ - type: 'mlsd_image_processor'; + type: "mlsd_image_processor"; /** * Image * @description The image to process */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Detect Resolution * @description Pixel resolution for detection @@ -4504,7 +4385,7 @@ export type components = { * @description An enumeration. * @enum {string} */ - ModelError: 'not_found'; + ModelError: "not_found"; /** ModelInfo */ ModelInfo: { /** @@ -4513,11 +4394,11 @@ export type components = { */ model_name: string; /** @description Base model */ - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** @description Info to load submodel */ - model_type: components['schemas']['ModelType']; + model_type: components["schemas"]["ModelType"]; /** @description Info to load submodel */ - submodel?: components['schemas']['SubModelType']; + submodel?: components["schemas"]["SubModelType"]; }; /** * ModelLoaderOutput @@ -4529,53 +4410,39 @@ export type components = { * @default model_loader_output * @enum {string} */ - type: 'model_loader_output'; + type: "model_loader_output"; /** * UNet * @description UNet (scheduler, LoRAs) */ - unet: components['schemas']['UNetField']; + unet: components["schemas"]["UNetField"]; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip: components['schemas']['ClipField']; + clip: components["schemas"]["ClipField"]; /** * VAE * @description VAE */ - vae: components['schemas']['VaeField']; + vae: components["schemas"]["VaeField"]; }; /** * ModelType * @description An enumeration. * @enum {string} */ - ModelType: 'onnx' | 'main' | 'vae' | 'lora' | 'controlnet' | 'embedding'; + ModelType: "onnx" | "main" | "vae" | "lora" | "controlnet" | "embedding"; /** * ModelVariantType * @description An enumeration. * @enum {string} */ - ModelVariantType: 'normal' | 'inpaint' | 'depth'; + ModelVariantType: "normal" | "inpaint" | "depth"; /** ModelsList */ ModelsList: { /** Models */ - models: ( - | components['schemas']['ONNXStableDiffusion1ModelConfig'] - | components['schemas']['StableDiffusion1ModelCheckpointConfig'] - | components['schemas']['StableDiffusion1ModelDiffusersConfig'] - | components['schemas']['VaeModelConfig'] - | components['schemas']['LoRAModelConfig'] - | components['schemas']['ControlNetModelCheckpointConfig'] - | components['schemas']['ControlNetModelDiffusersConfig'] - | components['schemas']['TextualInversionModelConfig'] - | components['schemas']['ONNXStableDiffusion2ModelConfig'] - | components['schemas']['StableDiffusion2ModelCheckpointConfig'] - | components['schemas']['StableDiffusion2ModelDiffusersConfig'] - | components['schemas']['StableDiffusionXLModelCheckpointConfig'] - | components['schemas']['StableDiffusionXLModelDiffusersConfig'] - )[]; + models: (components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"])[]; }; /** * Multiply Integers @@ -4598,7 +4465,7 @@ export type components = { * @default mul * @enum {string} */ - type: 'mul'; + type: "mul"; /** * A * @description The first number @@ -4633,7 +4500,7 @@ export type components = { * @default noise * @enum {string} */ - type: 'noise'; + type: "noise"; /** * Seed * @description Seed for random number generation @@ -4668,12 +4535,12 @@ export type components = { * @default noise_output * @enum {string} */ - type: 'noise_output'; + type: "noise_output"; /** * Noise * @description Noise tensor */ - noise?: components['schemas']['LatentsField']; + noise?: components["schemas"]["LatentsField"]; /** * Width * @description Width of output (px) @@ -4706,12 +4573,12 @@ export type components = { * @default normalbae_image_processor * @enum {string} */ - type: 'normalbae_image_processor'; + type: "normalbae_image_processor"; /** * Image * @description The image to process */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Detect Resolution * @description Pixel resolution for detection @@ -4746,22 +4613,22 @@ export type components = { * @default l2i_onnx * @enum {string} */ - type: 'l2i_onnx'; + type: "l2i_onnx"; /** * Latents * @description Denoised latents tensor */ - latents?: components['schemas']['LatentsField']; + latents?: components["schemas"]["LatentsField"]; /** * Vae * @description VAE */ - vae?: components['schemas']['VaeField']; + vae?: components["schemas"]["VaeField"]; /** * Metadata * @description Optional core metadata to be written to image */ - metadata?: components['schemas']['CoreMetadata']; + metadata?: components["schemas"]["CoreMetadata"]; }; /** * ONNXModelLoaderOutput @@ -4773,27 +4640,27 @@ export type components = { * @default model_loader_output_onnx * @enum {string} */ - type: 'model_loader_output_onnx'; + type: "model_loader_output_onnx"; /** * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components['schemas']['UNetField']; + unet?: components["schemas"]["UNetField"]; /** * CLIP * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components['schemas']['ClipField']; + clip?: components["schemas"]["ClipField"]; /** * VAE Decoder * @description VAE */ - vae_decoder?: components['schemas']['VaeField']; + vae_decoder?: components["schemas"]["VaeField"]; /** * VAE Encoder * @description VAE */ - vae_encoder?: components['schemas']['VaeField']; + vae_encoder?: components["schemas"]["VaeField"]; }; /** * ONNX Prompt (Raw) @@ -4817,7 +4684,7 @@ export type components = { * @default prompt_onnx * @enum {string} */ - type: 'prompt_onnx'; + type: "prompt_onnx"; /** * Prompt * @description Raw prompt text (no parsing) @@ -4828,18 +4695,18 @@ export type components = { * Clip * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components['schemas']['ClipField']; + clip?: components["schemas"]["ClipField"]; }; /** ONNXStableDiffusion1ModelConfig */ ONNXStableDiffusion1ModelConfig: { /** Model Name */ model_name: string; - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** * Model Type * @enum {string} */ - model_type: 'onnx'; + model_type: "onnx"; /** Path */ path: string; /** Description */ @@ -4848,20 +4715,20 @@ export type components = { * Model Format * @enum {string} */ - model_format: 'onnx'; - error?: components['schemas']['ModelError']; - variant: components['schemas']['ModelVariantType']; + model_format: "onnx"; + error?: components["schemas"]["ModelError"]; + variant: components["schemas"]["ModelVariantType"]; }; /** ONNXStableDiffusion2ModelConfig */ ONNXStableDiffusion2ModelConfig: { /** Model Name */ model_name: string; - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** * Model Type * @enum {string} */ - model_type: 'onnx'; + model_type: "onnx"; /** Path */ path: string; /** Description */ @@ -4870,10 +4737,10 @@ export type components = { * Model Format * @enum {string} */ - model_format: 'onnx'; - error?: components['schemas']['ModelError']; - variant: components['schemas']['ModelVariantType']; - prediction_type: components['schemas']['SchedulerPredictionType']; + model_format: "onnx"; + error?: components["schemas"]["ModelError"]; + variant: components["schemas"]["ModelVariantType"]; + prediction_type: components["schemas"]["SchedulerPredictionType"]; /** Upcast Attention */ upcast_attention: boolean; }; @@ -4898,22 +4765,22 @@ export type components = { * @default t2l_onnx * @enum {string} */ - type: 't2l_onnx'; + type: "t2l_onnx"; /** * Positive Conditioning * @description Positive conditioning tensor */ - positive_conditioning?: components['schemas']['ConditioningField']; + positive_conditioning?: components["schemas"]["ConditioningField"]; /** * Negative Conditioning * @description Negative conditioning tensor */ - negative_conditioning?: components['schemas']['ConditioningField']; + negative_conditioning?: components["schemas"]["ConditioningField"]; /** * Noise * @description Noise tensor */ - noise?: components['schemas']['LatentsField']; + noise?: components["schemas"]["LatentsField"]; /** * Steps * @description Number of steps to run @@ -4932,60 +4799,24 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: - | 'ddim' - | 'ddpm' - | 'deis' - | 'lms' - | 'lms_k' - | 'pndm' - | 'heun' - | 'heun_k' - | 'euler' - | 'euler_k' - | 'euler_a' - | 'kdpm_2' - | 'kdpm_2_a' - | 'dpmpp_2s' - | 'dpmpp_2s_k' - | 'dpmpp_2m' - | 'dpmpp_2m_k' - | 'dpmpp_2m_sde' - | 'dpmpp_2m_sde_k' - | 'dpmpp_sde' - | 'dpmpp_sde_k' - | 'unipc'; + scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; /** * Precision * @description Precision to use * @default tensor(float16) * @enum {string} */ - precision?: - | 'tensor(bool)' - | 'tensor(int8)' - | 'tensor(uint8)' - | 'tensor(int16)' - | 'tensor(uint16)' - | 'tensor(int32)' - | 'tensor(uint32)' - | 'tensor(int64)' - | 'tensor(uint64)' - | 'tensor(float16)' - | 'tensor(float)' - | 'tensor(double)'; + precision?: "tensor(bool)" | "tensor(int8)" | "tensor(uint8)" | "tensor(int16)" | "tensor(uint16)" | "tensor(int32)" | "tensor(uint32)" | "tensor(int64)" | "tensor(uint64)" | "tensor(float16)" | "tensor(float)" | "tensor(double)"; /** * Unet * @description UNet (scheduler, LoRAs) */ - unet?: components['schemas']['UNetField']; + unet?: components["schemas"]["UNetField"]; /** * Control * @description ControlNet(s) to apply */ - control?: - | components['schemas']['ControlField'] - | components['schemas']['ControlField'][]; + control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][]; }; /** * OffsetPaginatedResults[BoardDTO] @@ -4996,7 +4827,7 @@ export type components = { * Items * @description Items */ - items: components['schemas']['BoardDTO'][]; + items: components["schemas"]["BoardDTO"][]; /** * Offset * @description Offset from which to retrieve items @@ -5022,7 +4853,7 @@ export type components = { * Items * @description Items */ - items: components['schemas']['ImageDTO'][]; + items: components["schemas"]["ImageDTO"][]; /** * Offset * @description Offset from which to retrieve items @@ -5050,9 +4881,9 @@ export type components = { */ model_name: string; /** @description Base model */ - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** @description Model Type */ - model_type: components['schemas']['ModelType']; + model_type: components["schemas"]["ModelType"]; }; /** * ONNX Main Model @@ -5075,12 +4906,12 @@ export type components = { * @default onnx_model_loader * @enum {string} */ - type: 'onnx_model_loader'; + type: "onnx_model_loader"; /** * Model * @description ONNX Main model (UNet, VAE, CLIP) to load */ - model: components['schemas']['OnnxModelField']; + model: components["schemas"]["OnnxModelField"]; }; /** * Openpose Processor @@ -5103,12 +4934,12 @@ export type components = { * @default openpose_image_processor * @enum {string} */ - type: 'openpose_image_processor'; + type: "openpose_image_processor"; /** * Image * @description The image to process */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Hand And Face * @description Whether to use hands and face mode @@ -5137,7 +4968,7 @@ export type components = { * Items * @description Items */ - items: components['schemas']['GraphExecutionState'][]; + items: components["schemas"]["GraphExecutionState"][]; /** * Page * @description Current Page @@ -5180,12 +5011,12 @@ export type components = { * @default pidi_image_processor * @enum {string} */ - type: 'pidi_image_processor'; + type: "pidi_image_processor"; /** * Image * @description The image to process */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Detect Resolution * @description Pixel resolution for detection @@ -5232,7 +5063,7 @@ export type components = { * @default prompt_from_file * @enum {string} */ - type: 'prompt_from_file'; + type: "prompt_from_file"; /** * File Path * @description Path to prompt text file @@ -5282,7 +5113,7 @@ export type components = { * @default rand_int * @enum {string} */ - type: 'rand_int'; + type: "rand_int"; /** * Low * @description The inclusive low value @@ -5317,7 +5148,7 @@ export type components = { * @default random_range * @enum {string} */ - type: 'random_range'; + type: "random_range"; /** * Low * @description The inclusive low value @@ -5363,7 +5194,7 @@ export type components = { * @default range * @enum {string} */ - type: 'range'; + type: "range"; /** * Start * @description The start of the range @@ -5404,7 +5235,7 @@ export type components = { * @default range_of_size * @enum {string} */ - type: 'range_of_size'; + type: "range_of_size"; /** * Start * @description The start of the range @@ -5453,12 +5284,12 @@ export type components = { * @default lresize * @enum {string} */ - type: 'lresize'; + type: "lresize"; /** * Latents * @description Latents tensor */ - latents?: components['schemas']['LatentsField']; + latents?: components["schemas"]["LatentsField"]; /** * Width * @description Width of output (px) @@ -5475,14 +5306,7 @@ export type components = { * @default bilinear * @enum {string} */ - mode?: - | 'nearest' - | 'linear' - | 'bilinear' - | 'bicubic' - | 'trilinear' - | 'area' - | 'nearest-exact'; + mode?: "nearest" | "linear" | "bilinear" | "bicubic" | "trilinear" | "area" | "nearest-exact"; /** * Antialias * @description Whether or not to apply antialiasing (bilinear or bicubic only) @@ -5499,7 +5323,7 @@ export type components = { * This may be a user-initiated upload, or an internal application upload (eg Canvas init image). * @enum {string} */ - ResourceOrigin: 'internal' | 'external'; + ResourceOrigin: "internal" | "external"; /** * SDXL Compel Prompt * @description Parse prompt using compel package to conditioning. @@ -5521,7 +5345,7 @@ export type components = { * @default sdxl_compel_prompt * @enum {string} */ - type: 'sdxl_compel_prompt'; + type: "sdxl_compel_prompt"; /** * Prompt * @description Prompt to be parsed by Compel to create a conditioning tensor @@ -5568,12 +5392,12 @@ export type components = { * Clip * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components['schemas']['ClipField']; + clip?: components["schemas"]["ClipField"]; /** * Clip2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2?: components['schemas']['ClipField']; + clip2?: components["schemas"]["ClipField"]; }; /** * SDXL LoRA @@ -5596,12 +5420,12 @@ export type components = { * @default sdxl_lora_loader * @enum {string} */ - type: 'sdxl_lora_loader'; + type: "sdxl_lora_loader"; /** * LoRA * @description LoRA model to load */ - lora: components['schemas']['LoRAModelField']; + lora: components["schemas"]["LoRAModelField"]; /** * Weight * @description The weight at which the LoRA is applied to each model @@ -5612,17 +5436,17 @@ export type components = { * UNET * @description UNet (scheduler, LoRAs) */ - unet?: components['schemas']['UNetField']; + unet?: components["schemas"]["UNetField"]; /** * CLIP 1 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components['schemas']['ClipField']; + clip?: components["schemas"]["ClipField"]; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2?: components['schemas']['ClipField']; + clip2?: components["schemas"]["ClipField"]; }; /** * SDXLLoraLoaderOutput @@ -5634,22 +5458,22 @@ export type components = { * @default sdxl_lora_loader_output * @enum {string} */ - type: 'sdxl_lora_loader_output'; + type: "sdxl_lora_loader_output"; /** * UNet * @description UNet (scheduler, LoRAs) */ - unet?: components['schemas']['UNetField']; + unet?: components["schemas"]["UNetField"]; /** * CLIP 1 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip?: components['schemas']['ClipField']; + clip?: components["schemas"]["ClipField"]; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2?: components['schemas']['ClipField']; + clip2?: components["schemas"]["ClipField"]; }; /** * SDXL Main Model @@ -5672,12 +5496,12 @@ export type components = { * @default sdxl_model_loader * @enum {string} */ - type: 'sdxl_model_loader'; + type: "sdxl_model_loader"; /** * Model * @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load */ - model: components['schemas']['MainModelField']; + model: components["schemas"]["MainModelField"]; }; /** * SDXLModelLoaderOutput @@ -5689,27 +5513,27 @@ export type components = { * @default sdxl_model_loader_output * @enum {string} */ - type: 'sdxl_model_loader_output'; + type: "sdxl_model_loader_output"; /** * UNet * @description UNet (scheduler, LoRAs) */ - unet: components['schemas']['UNetField']; + unet: components["schemas"]["UNetField"]; /** * CLIP 1 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip: components['schemas']['ClipField']; + clip: components["schemas"]["ClipField"]; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2: components['schemas']['ClipField']; + clip2: components["schemas"]["ClipField"]; /** * VAE * @description VAE */ - vae: components['schemas']['VaeField']; + vae: components["schemas"]["VaeField"]; }; /** * SDXL Refiner Compel Prompt @@ -5732,7 +5556,7 @@ export type components = { * @default sdxl_refiner_compel_prompt * @enum {string} */ - type: 'sdxl_refiner_compel_prompt'; + type: "sdxl_refiner_compel_prompt"; /** * Style * @description Prompt to be parsed by Compel to create a conditioning tensor @@ -5769,7 +5593,7 @@ export type components = { * Clip2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2?: components['schemas']['ClipField']; + clip2?: components["schemas"]["ClipField"]; }; /** * SDXL Refiner Model @@ -5792,12 +5616,12 @@ export type components = { * @default sdxl_refiner_model_loader * @enum {string} */ - type: 'sdxl_refiner_model_loader'; + type: "sdxl_refiner_model_loader"; /** * Model * @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load */ - model: components['schemas']['MainModelField']; + model: components["schemas"]["MainModelField"]; }; /** * SDXLRefinerModelLoaderOutput @@ -5809,22 +5633,22 @@ export type components = { * @default sdxl_refiner_model_loader_output * @enum {string} */ - type: 'sdxl_refiner_model_loader_output'; + type: "sdxl_refiner_model_loader_output"; /** * UNet * @description UNet (scheduler, LoRAs) */ - unet: components['schemas']['UNetField']; + unet: components["schemas"]["UNetField"]; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - clip2: components['schemas']['ClipField']; + clip2: components["schemas"]["ClipField"]; /** * VAE * @description VAE */ - vae: components['schemas']['VaeField']; + vae: components["schemas"]["VaeField"]; }; /** * Scale Latents @@ -5847,12 +5671,12 @@ export type components = { * @default lscale * @enum {string} */ - type: 'lscale'; + type: "lscale"; /** * Latents * @description Latents tensor */ - latents?: components['schemas']['LatentsField']; + latents?: components["schemas"]["LatentsField"]; /** * Scale Factor * @description The factor by which to scale @@ -5864,14 +5688,7 @@ export type components = { * @default bilinear * @enum {string} */ - mode?: - | 'nearest' - | 'linear' - | 'bilinear' - | 'bicubic' - | 'trilinear' - | 'area' - | 'nearest-exact'; + mode?: "nearest" | "linear" | "bilinear" | "bicubic" | "trilinear" | "area" | "nearest-exact"; /** * Antialias * @description Whether or not to apply antialiasing (bilinear or bicubic only) @@ -5884,7 +5701,7 @@ export type components = { * @description An enumeration. * @enum {string} */ - SchedulerPredictionType: 'epsilon' | 'v_prediction' | 'sample'; + SchedulerPredictionType: "epsilon" | "v_prediction" | "sample"; /** * Segment Anything Processor * @description Applies segment anything processing to image @@ -5906,12 +5723,12 @@ export type components = { * @default segment_anything_processor * @enum {string} */ - type: 'segment_anything_processor'; + type: "segment_anything_processor"; /** * Image * @description The image to process */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; }; /** * Show Image @@ -5934,23 +5751,23 @@ export type components = { * @default show_image * @enum {string} */ - type: 'show_image'; + type: "show_image"; /** * Image * @description The image to show */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; }; /** StableDiffusion1ModelCheckpointConfig */ StableDiffusion1ModelCheckpointConfig: { /** Model Name */ model_name: string; - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** * Model Type * @enum {string} */ - model_type: 'main'; + model_type: "main"; /** Path */ path: string; /** Description */ @@ -5959,24 +5776,24 @@ export type components = { * Model Format * @enum {string} */ - model_format: 'checkpoint'; - error?: components['schemas']['ModelError']; + model_format: "checkpoint"; + error?: components["schemas"]["ModelError"]; /** Vae */ vae?: string; /** Config */ config: string; - variant: components['schemas']['ModelVariantType']; + variant: components["schemas"]["ModelVariantType"]; }; /** StableDiffusion1ModelDiffusersConfig */ StableDiffusion1ModelDiffusersConfig: { /** Model Name */ model_name: string; - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** * Model Type * @enum {string} */ - model_type: 'main'; + model_type: "main"; /** Path */ path: string; /** Description */ @@ -5985,22 +5802,22 @@ export type components = { * Model Format * @enum {string} */ - model_format: 'diffusers'; - error?: components['schemas']['ModelError']; + model_format: "diffusers"; + error?: components["schemas"]["ModelError"]; /** Vae */ vae?: string; - variant: components['schemas']['ModelVariantType']; + variant: components["schemas"]["ModelVariantType"]; }; /** StableDiffusion2ModelCheckpointConfig */ StableDiffusion2ModelCheckpointConfig: { /** Model Name */ model_name: string; - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** * Model Type * @enum {string} */ - model_type: 'main'; + model_type: "main"; /** Path */ path: string; /** Description */ @@ -6009,24 +5826,24 @@ export type components = { * Model Format * @enum {string} */ - model_format: 'checkpoint'; - error?: components['schemas']['ModelError']; + model_format: "checkpoint"; + error?: components["schemas"]["ModelError"]; /** Vae */ vae?: string; /** Config */ config: string; - variant: components['schemas']['ModelVariantType']; + variant: components["schemas"]["ModelVariantType"]; }; /** StableDiffusion2ModelDiffusersConfig */ StableDiffusion2ModelDiffusersConfig: { /** Model Name */ model_name: string; - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** * Model Type * @enum {string} */ - model_type: 'main'; + model_type: "main"; /** Path */ path: string; /** Description */ @@ -6035,22 +5852,22 @@ export type components = { * Model Format * @enum {string} */ - model_format: 'diffusers'; - error?: components['schemas']['ModelError']; + model_format: "diffusers"; + error?: components["schemas"]["ModelError"]; /** Vae */ vae?: string; - variant: components['schemas']['ModelVariantType']; + variant: components["schemas"]["ModelVariantType"]; }; /** StableDiffusionXLModelCheckpointConfig */ StableDiffusionXLModelCheckpointConfig: { /** Model Name */ model_name: string; - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** * Model Type * @enum {string} */ - model_type: 'main'; + model_type: "main"; /** Path */ path: string; /** Description */ @@ -6059,24 +5876,24 @@ export type components = { * Model Format * @enum {string} */ - model_format: 'checkpoint'; - error?: components['schemas']['ModelError']; + model_format: "checkpoint"; + error?: components["schemas"]["ModelError"]; /** Vae */ vae?: string; /** Config */ config: string; - variant: components['schemas']['ModelVariantType']; + variant: components["schemas"]["ModelVariantType"]; }; /** StableDiffusionXLModelDiffusersConfig */ StableDiffusionXLModelDiffusersConfig: { /** Model Name */ model_name: string; - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** * Model Type * @enum {string} */ - model_type: 'main'; + model_type: "main"; /** Path */ path: string; /** Description */ @@ -6085,11 +5902,11 @@ export type components = { * Model Format * @enum {string} */ - model_format: 'diffusers'; - error?: components['schemas']['ModelError']; + model_format: "diffusers"; + error?: components["schemas"]["ModelError"]; /** Vae */ vae?: string; - variant: components['schemas']['ModelVariantType']; + variant: components["schemas"]["ModelVariantType"]; }; /** * Step Param Easing @@ -6112,45 +5929,14 @@ export type components = { * @default step_param_easing * @enum {string} */ - type: 'step_param_easing'; + type: "step_param_easing"; /** * Easing * @description The easing function to use * @default Linear * @enum {string} */ - easing?: - | 'Linear' - | 'QuadIn' - | 'QuadOut' - | 'QuadInOut' - | 'CubicIn' - | 'CubicOut' - | 'CubicInOut' - | 'QuarticIn' - | 'QuarticOut' - | 'QuarticInOut' - | 'QuinticIn' - | 'QuinticOut' - | 'QuinticInOut' - | 'SineIn' - | 'SineOut' - | 'SineInOut' - | 'CircularIn' - | 'CircularOut' - | 'CircularInOut' - | 'ExponentialIn' - | 'ExponentialOut' - | 'ExponentialInOut' - | 'ElasticIn' - | 'ElasticOut' - | 'ElasticInOut' - | 'BackIn' - | 'BackOut' - | 'BackInOut' - | 'BounceIn' - | 'BounceOut' - | 'BounceInOut'; + easing?: "Linear" | "QuadIn" | "QuadOut" | "QuadInOut" | "CubicIn" | "CubicOut" | "CubicInOut" | "QuarticIn" | "QuarticOut" | "QuarticInOut" | "QuinticIn" | "QuinticOut" | "QuinticInOut" | "SineIn" | "SineOut" | "SineInOut" | "CircularIn" | "CircularOut" | "CircularInOut" | "ExponentialIn" | "ExponentialOut" | "ExponentialInOut" | "ElasticIn" | "ElasticOut" | "ElasticInOut" | "BackIn" | "BackOut" | "BackInOut" | "BounceIn" | "BounceOut" | "BounceInOut"; /** * Num Steps * @description number of denoising steps @@ -6225,7 +6011,7 @@ export type components = { * @default string_collection * @enum {string} */ - type: 'string_collection'; + type: "string_collection"; /** * Collection * @description The collection of string values @@ -6242,7 +6028,7 @@ export type components = { * @default string_collection_output * @enum {string} */ - type: 'string_collection_output'; + type: "string_collection_output"; /** * Collection * @description The output strings @@ -6270,7 +6056,7 @@ export type components = { * @default string * @enum {string} */ - type: 'string'; + type: "string"; /** * Value * @description The string value @@ -6288,7 +6074,7 @@ export type components = { * @default string_output * @enum {string} */ - type: 'string_output'; + type: "string_output"; /** * Value * @description The output string @@ -6300,17 +6086,7 @@ export type components = { * @description An enumeration. * @enum {string} */ - SubModelType: - | 'unet' - | 'text_encoder' - | 'text_encoder_2' - | 'tokenizer' - | 'tokenizer_2' - | 'vae' - | 'vae_decoder' - | 'vae_encoder' - | 'scheduler' - | 'safety_checker'; + SubModelType: "unet" | "text_encoder" | "text_encoder_2" | "tokenizer" | "tokenizer_2" | "vae" | "vae_decoder" | "vae_encoder" | "scheduler" | "safety_checker"; /** * Subtract Integers * @description Subtracts two numbers @@ -6332,7 +6108,7 @@ export type components = { * @default sub * @enum {string} */ - type: 'sub'; + type: "sub"; /** * A * @description The first number @@ -6350,19 +6126,19 @@ export type components = { TextualInversionModelConfig: { /** Model Name */ model_name: string; - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** * Model Type * @enum {string} */ - model_type: 'embedding'; + model_type: "embedding"; /** Path */ path: string; /** Description */ description?: string; /** Model Format */ model_format: null; - error?: components['schemas']['ModelError']; + error?: components["schemas"]["ModelError"]; }; /** * Tile Resample Processor @@ -6385,12 +6161,12 @@ export type components = { * @default tile_image_processor * @enum {string} */ - type: 'tile_image_processor'; + type: "tile_image_processor"; /** * Image * @description The image to process */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; /** * Down Sampling Rate * @description Down sampling rate @@ -6404,17 +6180,17 @@ export type components = { * Unet * @description Info to load unet submodel */ - unet: components['schemas']['ModelInfo']; + unet: components["schemas"]["ModelInfo"]; /** * Scheduler * @description Info to load scheduler submodel */ - scheduler: components['schemas']['ModelInfo']; + scheduler: components["schemas"]["ModelInfo"]; /** * Loras * @description Loras to apply on model loading */ - loras: components['schemas']['LoraInfo'][]; + loras: components["schemas"]["LoraInfo"][]; }; /** Upscaler */ Upscaler: { @@ -6440,7 +6216,7 @@ export type components = { */ model_name: string; /** @description Base model */ - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; }; /** VaeField */ VaeField: { @@ -6448,7 +6224,7 @@ export type components = { * Vae * @description Info to load vae submodel */ - vae: components['schemas']['ModelInfo']; + vae: components["schemas"]["ModelInfo"]; }; /** * VAE @@ -6471,12 +6247,12 @@ export type components = { * @default vae_loader * @enum {string} */ - type: 'vae_loader'; + type: "vae_loader"; /** * VAE * @description VAE model to load */ - vae_model: components['schemas']['VAEModelField']; + vae_model: components["schemas"]["VAEModelField"]; }; /** * VaeLoaderOutput @@ -6488,36 +6264,36 @@ export type components = { * @default vae_loader_output * @enum {string} */ - type: 'vae_loader_output'; + type: "vae_loader_output"; /** * VAE * @description VAE */ - vae: components['schemas']['VaeField']; + vae: components["schemas"]["VaeField"]; }; /** VaeModelConfig */ VaeModelConfig: { /** Model Name */ model_name: string; - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** * Model Type * @enum {string} */ - model_type: 'vae'; + model_type: "vae"; /** Path */ path: string; /** Description */ description?: string; - model_format: components['schemas']['VaeModelFormat']; - error?: components['schemas']['ModelError']; + model_format: components["schemas"]["VaeModelFormat"]; + error?: components["schemas"]["ModelError"]; }; /** * VaeModelFormat * @description An enumeration. * @enum {string} */ - VaeModelFormat: 'checkpoint' | 'diffusers'; + VaeModelFormat: "checkpoint" | "diffusers"; /** ValidationError */ ValidationError: { /** Location */ @@ -6548,12 +6324,12 @@ export type components = { * @default zoe_depth_image_processor * @enum {string} */ - type: 'zoe_depth_image_processor'; + type: "zoe_depth_image_processor"; /** * Image * @description The image to process */ - image?: components['schemas']['ImageField']; + image?: components["schemas"]["ImageField"]; }; /** * UIConfigBase @@ -6581,53 +6357,20 @@ export type components = { * - `Input.Any`: The field may have its value provided either directly or by a connection. * @enum {string} */ - Input: 'connection' | 'direct' | 'any'; + Input: "connection" | "direct" | "any"; /** * UIType * @description Type hints for the UI. * If a field should be provided a data type that does not exactly match the python type of the field, use this to provide the type that should be used instead. See the node development docs for detail on adding a new field type, which involves client-side changes. * @enum {string} */ - UIType: - | 'integer' - | 'float' - | 'boolean' - | 'string' - | 'array' - | 'ImageField' - | 'LatentsField' - | 'ConditioningField' - | 'ControlField' - | 'ColorField' - | 'ImageCollection' - | 'ConditioningCollection' - | 'ColorCollection' - | 'LatentsCollection' - | 'IntegerCollection' - | 'FloatCollection' - | 'StringCollection' - | 'BooleanCollection' - | 'MainModelField' - | 'SDXLMainModelField' - | 'SDXLRefinerModelField' - | 'ONNXModelField' - | 'VaeModelField' - | 'LoRAModelField' - | 'ControlNetModelField' - | 'UNetField' - | 'VaeField' - | 'ClipField' - | 'Collection' - | 'CollectionItem' - | 'FilePath' - | 'enum' - | 'Scheduler'; + UIType: "integer" | "float" | "boolean" | "string" | "array" | "ImageField" | "LatentsField" | "ConditioningField" | "ControlField" | "ColorField" | "ImageCollection" | "ConditioningCollection" | "ColorCollection" | "LatentsCollection" | "IntegerCollection" | "FloatCollection" | "StringCollection" | "BooleanCollection" | "MainModelField" | "SDXLMainModelField" | "SDXLRefinerModelField" | "ONNXModelField" | "VaeModelField" | "LoRAModelField" | "ControlNetModelField" | "UNetField" | "VaeField" | "ClipField" | "Collection" | "CollectionItem" | "FilePath" | "enum" | "Scheduler"; /** * UIComponent * @description The type of UI component to use for a field, used to override the default components, which are inferred from the field type. * @enum {string} */ - UIComponent: 'none' | 'textarea' | 'slider'; + UIComponent: "none" | "textarea" | "slider"; /** * _InputField * @description *DO NOT USE* @@ -6636,11 +6379,11 @@ export type components = { * purpose in the backend. */ _InputField: { - input: components['schemas']['Input']; + input: components["schemas"]["Input"]; /** Ui Hidden */ ui_hidden: boolean; - ui_type?: components['schemas']['UIType']; - ui_component?: components['schemas']['UIComponent']; + ui_type?: components["schemas"]["UIType"]; + ui_component?: components["schemas"]["UIComponent"]; /** Ui Order */ ui_order?: number; }; @@ -6654,40 +6397,40 @@ export type components = { _OutputField: { /** Ui Hidden */ ui_hidden: boolean; - ui_type?: components['schemas']['UIType']; + ui_type?: components["schemas"]["UIType"]; /** Ui Order */ ui_order?: number; }; - /** - * ControlNetModelFormat - * @description An enumeration. - * @enum {string} - */ - ControlNetModelFormat: 'checkpoint' | 'diffusers'; - /** - * StableDiffusion1ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion1ModelFormat: 'checkpoint' | 'diffusers'; /** * StableDiffusionOnnxModelFormat * @description An enumeration. * @enum {string} */ - StableDiffusionOnnxModelFormat: 'olive' | 'onnx'; - /** - * StableDiffusionXLModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionXLModelFormat: 'checkpoint' | 'diffusers'; + StableDiffusionOnnxModelFormat: "olive" | "onnx"; /** * StableDiffusion2ModelFormat * @description An enumeration. * @enum {string} */ - StableDiffusion2ModelFormat: 'checkpoint' | 'diffusers'; + StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; + /** + * ControlNetModelFormat + * @description An enumeration. + * @enum {string} + */ + ControlNetModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusion1ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusionXLModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; @@ -6701,6 +6444,7 @@ export type $defs = Record; export type external = Record; export type operations = { + /** * List Sessions * @description Gets a list of sessions, optionally searching @@ -6720,13 +6464,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['PaginatedResults_GraphExecutionState_']; + "application/json": components["schemas"]["PaginatedResults_GraphExecutionState_"]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -6738,14 +6482,14 @@ export type operations = { create_session: { requestBody?: { content: { - 'application/json': components['schemas']['Graph']; + "application/json": components["schemas"]["Graph"]; }; }; responses: { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['GraphExecutionState']; + "application/json": components["schemas"]["GraphExecutionState"]; }; }; /** @description Invalid json */ @@ -6755,7 +6499,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -6775,7 +6519,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['GraphExecutionState']; + "application/json": components["schemas"]["GraphExecutionState"]; }; }; /** @description Session not found */ @@ -6785,7 +6529,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -6803,110 +6547,14 @@ export type operations = { }; requestBody: { content: { - 'application/json': - | components['schemas']['BooleanInvocation'] - | components['schemas']['BooleanCollectionInvocation'] - | components['schemas']['IntegerInvocation'] - | components['schemas']['IntegerCollectionInvocation'] - | components['schemas']['FloatInvocation'] - | components['schemas']['FloatCollectionInvocation'] - | components['schemas']['StringInvocation'] - | components['schemas']['StringCollectionInvocation'] - | components['schemas']['ImageInvocation'] - | components['schemas']['ImageCollectionInvocation'] - | components['schemas']['LatentsInvocation'] - | components['schemas']['LatentsCollectionInvocation'] - | components['schemas']['ColorInvocation'] - | components['schemas']['ConditioningInvocation'] - | components['schemas']['ConditioningCollectionInvocation'] - | components['schemas']['ControlNetInvocation'] - | components['schemas']['ImageProcessorInvocation'] - | components['schemas']['MainModelLoaderInvocation'] - | components['schemas']['LoraLoaderInvocation'] - | components['schemas']['SDXLLoraLoaderInvocation'] - | components['schemas']['VaeLoaderInvocation'] - | components['schemas']['MetadataAccumulatorInvocation'] - | components['schemas']['RangeInvocation'] - | components['schemas']['RangeOfSizeInvocation'] - | components['schemas']['RandomRangeInvocation'] - | components['schemas']['CompelInvocation'] - | components['schemas']['SDXLCompelPromptInvocation'] - | components['schemas']['SDXLRefinerCompelPromptInvocation'] - | components['schemas']['ClipSkipInvocation'] - | components['schemas']['CvInpaintInvocation'] - | components['schemas']['ShowImageInvocation'] - | components['schemas']['BlankImageInvocation'] - | components['schemas']['ImageCropInvocation'] - | components['schemas']['ImagePasteInvocation'] - | components['schemas']['MaskFromAlphaInvocation'] - | components['schemas']['ImageMultiplyInvocation'] - | components['schemas']['ImageChannelInvocation'] - | components['schemas']['ImageConvertInvocation'] - | components['schemas']['ImageBlurInvocation'] - | components['schemas']['ImageResizeInvocation'] - | components['schemas']['ImageScaleInvocation'] - | components['schemas']['ImageLerpInvocation'] - | components['schemas']['ImageInverseLerpInvocation'] - | components['schemas']['ImageNSFWBlurInvocation'] - | components['schemas']['ImageWatermarkInvocation'] - | components['schemas']['MaskEdgeInvocation'] - | components['schemas']['MaskCombineInvocation'] - | components['schemas']['ColorCorrectInvocation'] - | components['schemas']['ImageHueAdjustmentInvocation'] - | components['schemas']['ImageLuminosityAdjustmentInvocation'] - | components['schemas']['ImageSaturationAdjustmentInvocation'] - | components['schemas']['InfillColorInvocation'] - | components['schemas']['InfillTileInvocation'] - | components['schemas']['InfillPatchMatchInvocation'] - | components['schemas']['LaMaInfillInvocation'] - | components['schemas']['DenoiseLatentsInvocation'] - | components['schemas']['LatentsToImageInvocation'] - | components['schemas']['ResizeLatentsInvocation'] - | components['schemas']['ScaleLatentsInvocation'] - | components['schemas']['ImageToLatentsInvocation'] - | components['schemas']['BlendLatentsInvocation'] - | components['schemas']['AddInvocation'] - | components['schemas']['SubtractInvocation'] - | components['schemas']['MultiplyInvocation'] - | components['schemas']['DivideInvocation'] - | components['schemas']['RandomIntInvocation'] - | components['schemas']['NoiseInvocation'] - | components['schemas']['ONNXPromptInvocation'] - | components['schemas']['ONNXTextToLatentsInvocation'] - | components['schemas']['ONNXLatentsToImageInvocation'] - | components['schemas']['OnnxModelLoaderInvocation'] - | components['schemas']['FloatLinearRangeInvocation'] - | components['schemas']['StepParamEasingInvocation'] - | components['schemas']['DynamicPromptInvocation'] - | components['schemas']['PromptsFromFileInvocation'] - | components['schemas']['SDXLModelLoaderInvocation'] - | components['schemas']['SDXLRefinerModelLoaderInvocation'] - | components['schemas']['ESRGANInvocation'] - | components['schemas']['GraphInvocation'] - | components['schemas']['IterateInvocation'] - | components['schemas']['CollectInvocation'] - | components['schemas']['CannyImageProcessorInvocation'] - | components['schemas']['HedImageProcessorInvocation'] - | components['schemas']['LineartImageProcessorInvocation'] - | components['schemas']['LineartAnimeImageProcessorInvocation'] - | components['schemas']['OpenposeImageProcessorInvocation'] - | components['schemas']['MidasDepthImageProcessorInvocation'] - | components['schemas']['NormalbaeImageProcessorInvocation'] - | components['schemas']['MlsdImageProcessorInvocation'] - | components['schemas']['PidiImageProcessorInvocation'] - | components['schemas']['ContentShuffleImageProcessorInvocation'] - | components['schemas']['ZoeDepthImageProcessorInvocation'] - | components['schemas']['MediapipeFaceProcessorInvocation'] - | components['schemas']['LeresImageProcessorInvocation'] - | components['schemas']['TileResamplerProcessorInvocation'] - | components['schemas']['SegmentAnythingProcessorInvocation']; + "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CreateInpaintMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; }; responses: { /** @description Successful Response */ 200: { content: { - 'application/json': string; + "application/json": string; }; }; /** @description Invalid node or link */ @@ -6920,7 +6568,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -6940,110 +6588,14 @@ export type operations = { }; requestBody: { content: { - 'application/json': - | components['schemas']['BooleanInvocation'] - | components['schemas']['BooleanCollectionInvocation'] - | components['schemas']['IntegerInvocation'] - | components['schemas']['IntegerCollectionInvocation'] - | components['schemas']['FloatInvocation'] - | components['schemas']['FloatCollectionInvocation'] - | components['schemas']['StringInvocation'] - | components['schemas']['StringCollectionInvocation'] - | components['schemas']['ImageInvocation'] - | components['schemas']['ImageCollectionInvocation'] - | components['schemas']['LatentsInvocation'] - | components['schemas']['LatentsCollectionInvocation'] - | components['schemas']['ColorInvocation'] - | components['schemas']['ConditioningInvocation'] - | components['schemas']['ConditioningCollectionInvocation'] - | components['schemas']['ControlNetInvocation'] - | components['schemas']['ImageProcessorInvocation'] - | components['schemas']['MainModelLoaderInvocation'] - | components['schemas']['LoraLoaderInvocation'] - | components['schemas']['SDXLLoraLoaderInvocation'] - | components['schemas']['VaeLoaderInvocation'] - | components['schemas']['MetadataAccumulatorInvocation'] - | components['schemas']['RangeInvocation'] - | components['schemas']['RangeOfSizeInvocation'] - | components['schemas']['RandomRangeInvocation'] - | components['schemas']['CompelInvocation'] - | components['schemas']['SDXLCompelPromptInvocation'] - | components['schemas']['SDXLRefinerCompelPromptInvocation'] - | components['schemas']['ClipSkipInvocation'] - | components['schemas']['CvInpaintInvocation'] - | components['schemas']['ShowImageInvocation'] - | components['schemas']['BlankImageInvocation'] - | components['schemas']['ImageCropInvocation'] - | components['schemas']['ImagePasteInvocation'] - | components['schemas']['MaskFromAlphaInvocation'] - | components['schemas']['ImageMultiplyInvocation'] - | components['schemas']['ImageChannelInvocation'] - | components['schemas']['ImageConvertInvocation'] - | components['schemas']['ImageBlurInvocation'] - | components['schemas']['ImageResizeInvocation'] - | components['schemas']['ImageScaleInvocation'] - | components['schemas']['ImageLerpInvocation'] - | components['schemas']['ImageInverseLerpInvocation'] - | components['schemas']['ImageNSFWBlurInvocation'] - | components['schemas']['ImageWatermarkInvocation'] - | components['schemas']['MaskEdgeInvocation'] - | components['schemas']['MaskCombineInvocation'] - | components['schemas']['ColorCorrectInvocation'] - | components['schemas']['ImageHueAdjustmentInvocation'] - | components['schemas']['ImageLuminosityAdjustmentInvocation'] - | components['schemas']['ImageSaturationAdjustmentInvocation'] - | components['schemas']['InfillColorInvocation'] - | components['schemas']['InfillTileInvocation'] - | components['schemas']['InfillPatchMatchInvocation'] - | components['schemas']['LaMaInfillInvocation'] - | components['schemas']['DenoiseLatentsInvocation'] - | components['schemas']['LatentsToImageInvocation'] - | components['schemas']['ResizeLatentsInvocation'] - | components['schemas']['ScaleLatentsInvocation'] - | components['schemas']['ImageToLatentsInvocation'] - | components['schemas']['BlendLatentsInvocation'] - | components['schemas']['AddInvocation'] - | components['schemas']['SubtractInvocation'] - | components['schemas']['MultiplyInvocation'] - | components['schemas']['DivideInvocation'] - | components['schemas']['RandomIntInvocation'] - | components['schemas']['NoiseInvocation'] - | components['schemas']['ONNXPromptInvocation'] - | components['schemas']['ONNXTextToLatentsInvocation'] - | components['schemas']['ONNXLatentsToImageInvocation'] - | components['schemas']['OnnxModelLoaderInvocation'] - | components['schemas']['FloatLinearRangeInvocation'] - | components['schemas']['StepParamEasingInvocation'] - | components['schemas']['DynamicPromptInvocation'] - | components['schemas']['PromptsFromFileInvocation'] - | components['schemas']['SDXLModelLoaderInvocation'] - | components['schemas']['SDXLRefinerModelLoaderInvocation'] - | components['schemas']['ESRGANInvocation'] - | components['schemas']['GraphInvocation'] - | components['schemas']['IterateInvocation'] - | components['schemas']['CollectInvocation'] - | components['schemas']['CannyImageProcessorInvocation'] - | components['schemas']['HedImageProcessorInvocation'] - | components['schemas']['LineartImageProcessorInvocation'] - | components['schemas']['LineartAnimeImageProcessorInvocation'] - | components['schemas']['OpenposeImageProcessorInvocation'] - | components['schemas']['MidasDepthImageProcessorInvocation'] - | components['schemas']['NormalbaeImageProcessorInvocation'] - | components['schemas']['MlsdImageProcessorInvocation'] - | components['schemas']['PidiImageProcessorInvocation'] - | components['schemas']['ContentShuffleImageProcessorInvocation'] - | components['schemas']['ZoeDepthImageProcessorInvocation'] - | components['schemas']['MediapipeFaceProcessorInvocation'] - | components['schemas']['LeresImageProcessorInvocation'] - | components['schemas']['TileResamplerProcessorInvocation'] - | components['schemas']['SegmentAnythingProcessorInvocation']; + "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CreateInpaintMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; }; responses: { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['GraphExecutionState']; + "application/json": components["schemas"]["GraphExecutionState"]; }; }; /** @description Invalid node or link */ @@ -7057,7 +6609,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7079,7 +6631,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['GraphExecutionState']; + "application/json": components["schemas"]["GraphExecutionState"]; }; }; /** @description Invalid node or link */ @@ -7093,7 +6645,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7111,14 +6663,14 @@ export type operations = { }; requestBody: { content: { - 'application/json': components['schemas']['Edge']; + "application/json": components["schemas"]["Edge"]; }; }; responses: { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['GraphExecutionState']; + "application/json": components["schemas"]["GraphExecutionState"]; }; }; /** @description Invalid node or link */ @@ -7132,7 +6684,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7160,7 +6712,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['GraphExecutionState']; + "application/json": components["schemas"]["GraphExecutionState"]; }; }; /** @description Invalid node or link */ @@ -7174,7 +6726,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7198,7 +6750,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': unknown; + "application/json": unknown; }; }; /** @description The invocation is queued */ @@ -7216,7 +6768,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7236,7 +6788,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': unknown; + "application/json": unknown; }; }; /** @description The invocation is canceled */ @@ -7246,7 +6798,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7259,22 +6811,22 @@ export type operations = { parameters: { query?: { /** @description Base models to include */ - base_models?: components['schemas']['BaseModelType'][]; + base_models?: components["schemas"]["BaseModelType"][]; /** @description The type of model to get */ - model_type?: components['schemas']['ModelType']; + model_type?: components["schemas"]["ModelType"]; }; }; responses: { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['ModelsList']; + "application/json": components["schemas"]["ModelsList"]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7287,9 +6839,9 @@ export type operations = { parameters: { path: { /** @description Base model */ - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** @description The type of model */ - model_type: components['schemas']['ModelType']; + model_type: components["schemas"]["ModelType"]; /** @description model name */ model_name: string; }; @@ -7306,7 +6858,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7319,49 +6871,23 @@ export type operations = { parameters: { path: { /** @description Base model */ - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** @description The type of model */ - model_type: components['schemas']['ModelType']; + model_type: components["schemas"]["ModelType"]; /** @description model name */ model_name: string; }; }; requestBody: { content: { - 'application/json': - | components['schemas']['ONNXStableDiffusion1ModelConfig'] - | components['schemas']['StableDiffusion1ModelCheckpointConfig'] - | components['schemas']['StableDiffusion1ModelDiffusersConfig'] - | components['schemas']['VaeModelConfig'] - | components['schemas']['LoRAModelConfig'] - | components['schemas']['ControlNetModelCheckpointConfig'] - | components['schemas']['ControlNetModelDiffusersConfig'] - | components['schemas']['TextualInversionModelConfig'] - | components['schemas']['ONNXStableDiffusion2ModelConfig'] - | components['schemas']['StableDiffusion2ModelCheckpointConfig'] - | components['schemas']['StableDiffusion2ModelDiffusersConfig'] - | components['schemas']['StableDiffusionXLModelCheckpointConfig'] - | components['schemas']['StableDiffusionXLModelDiffusersConfig']; + "application/json": components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"]; }; }; responses: { /** @description The model was updated successfully */ 200: { content: { - 'application/json': - | components['schemas']['ONNXStableDiffusion1ModelConfig'] - | components['schemas']['StableDiffusion1ModelCheckpointConfig'] - | components['schemas']['StableDiffusion1ModelDiffusersConfig'] - | components['schemas']['VaeModelConfig'] - | components['schemas']['LoRAModelConfig'] - | components['schemas']['ControlNetModelCheckpointConfig'] - | components['schemas']['ControlNetModelDiffusersConfig'] - | components['schemas']['TextualInversionModelConfig'] - | components['schemas']['ONNXStableDiffusion2ModelConfig'] - | components['schemas']['StableDiffusion2ModelCheckpointConfig'] - | components['schemas']['StableDiffusion2ModelDiffusersConfig'] - | components['schemas']['StableDiffusionXLModelCheckpointConfig'] - | components['schemas']['StableDiffusionXLModelDiffusersConfig']; + "application/json": components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"]; }; }; /** @description Bad request */ @@ -7379,7 +6905,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7391,27 +6917,14 @@ export type operations = { import_model: { requestBody: { content: { - 'application/json': components['schemas']['Body_import_model']; + "application/json": components["schemas"]["Body_import_model"]; }; }; responses: { /** @description The model imported successfully */ 201: { content: { - 'application/json': - | components['schemas']['ONNXStableDiffusion1ModelConfig'] - | components['schemas']['StableDiffusion1ModelCheckpointConfig'] - | components['schemas']['StableDiffusion1ModelDiffusersConfig'] - | components['schemas']['VaeModelConfig'] - | components['schemas']['LoRAModelConfig'] - | components['schemas']['ControlNetModelCheckpointConfig'] - | components['schemas']['ControlNetModelDiffusersConfig'] - | components['schemas']['TextualInversionModelConfig'] - | components['schemas']['ONNXStableDiffusion2ModelConfig'] - | components['schemas']['StableDiffusion2ModelCheckpointConfig'] - | components['schemas']['StableDiffusion2ModelDiffusersConfig'] - | components['schemas']['StableDiffusionXLModelCheckpointConfig'] - | components['schemas']['StableDiffusionXLModelDiffusersConfig']; + "application/json": components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"]; }; }; /** @description The model could not be found */ @@ -7429,7 +6942,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; /** @description The model appeared to import successfully, but could not be found in the model manager */ @@ -7445,40 +6958,14 @@ export type operations = { add_model: { requestBody: { content: { - 'application/json': - | components['schemas']['ONNXStableDiffusion1ModelConfig'] - | components['schemas']['StableDiffusion1ModelCheckpointConfig'] - | components['schemas']['StableDiffusion1ModelDiffusersConfig'] - | components['schemas']['VaeModelConfig'] - | components['schemas']['LoRAModelConfig'] - | components['schemas']['ControlNetModelCheckpointConfig'] - | components['schemas']['ControlNetModelDiffusersConfig'] - | components['schemas']['TextualInversionModelConfig'] - | components['schemas']['ONNXStableDiffusion2ModelConfig'] - | components['schemas']['StableDiffusion2ModelCheckpointConfig'] - | components['schemas']['StableDiffusion2ModelDiffusersConfig'] - | components['schemas']['StableDiffusionXLModelCheckpointConfig'] - | components['schemas']['StableDiffusionXLModelDiffusersConfig']; + "application/json": components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"]; }; }; responses: { /** @description The model added successfully */ 201: { content: { - 'application/json': - | components['schemas']['ONNXStableDiffusion1ModelConfig'] - | components['schemas']['StableDiffusion1ModelCheckpointConfig'] - | components['schemas']['StableDiffusion1ModelDiffusersConfig'] - | components['schemas']['VaeModelConfig'] - | components['schemas']['LoRAModelConfig'] - | components['schemas']['ControlNetModelCheckpointConfig'] - | components['schemas']['ControlNetModelDiffusersConfig'] - | components['schemas']['TextualInversionModelConfig'] - | components['schemas']['ONNXStableDiffusion2ModelConfig'] - | components['schemas']['StableDiffusion2ModelCheckpointConfig'] - | components['schemas']['StableDiffusion2ModelDiffusersConfig'] - | components['schemas']['StableDiffusionXLModelCheckpointConfig'] - | components['schemas']['StableDiffusionXLModelDiffusersConfig']; + "application/json": components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"]; }; }; /** @description The model could not be found */ @@ -7492,7 +6979,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; /** @description The model appeared to add successfully, but could not be found in the model manager */ @@ -7513,9 +7000,9 @@ export type operations = { }; path: { /** @description Base model */ - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; /** @description The type of model */ - model_type: components['schemas']['ModelType']; + model_type: components["schemas"]["ModelType"]; /** @description model name */ model_name: string; }; @@ -7524,20 +7011,7 @@ export type operations = { /** @description Model converted successfully */ 200: { content: { - 'application/json': - | components['schemas']['ONNXStableDiffusion1ModelConfig'] - | components['schemas']['StableDiffusion1ModelCheckpointConfig'] - | components['schemas']['StableDiffusion1ModelDiffusersConfig'] - | components['schemas']['VaeModelConfig'] - | components['schemas']['LoRAModelConfig'] - | components['schemas']['ControlNetModelCheckpointConfig'] - | components['schemas']['ControlNetModelDiffusersConfig'] - | components['schemas']['TextualInversionModelConfig'] - | components['schemas']['ONNXStableDiffusion2ModelConfig'] - | components['schemas']['StableDiffusion2ModelCheckpointConfig'] - | components['schemas']['StableDiffusion2ModelDiffusersConfig'] - | components['schemas']['StableDiffusionXLModelCheckpointConfig'] - | components['schemas']['StableDiffusionXLModelDiffusersConfig']; + "application/json": components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"]; }; }; /** @description Bad request */ @@ -7551,7 +7025,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7568,7 +7042,7 @@ export type operations = { /** @description Directory searched successfully */ 200: { content: { - 'application/json': string[]; + "application/json": string[]; }; }; /** @description Invalid directory path */ @@ -7578,7 +7052,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7592,7 +7066,7 @@ export type operations = { /** @description paths retrieved successfully */ 200: { content: { - 'application/json': string[]; + "application/json": string[]; }; }; }; @@ -7607,7 +7081,7 @@ export type operations = { /** @description synchronization successful */ 201: { content: { - 'application/json': boolean; + "application/json": boolean; }; }; }; @@ -7620,32 +7094,19 @@ export type operations = { parameters: { path: { /** @description Base model */ - base_model: components['schemas']['BaseModelType']; + base_model: components["schemas"]["BaseModelType"]; }; }; requestBody: { content: { - 'application/json': components['schemas']['Body_merge_models']; + "application/json": components["schemas"]["Body_merge_models"]; }; }; responses: { /** @description Model converted successfully */ 200: { content: { - 'application/json': - | components['schemas']['ONNXStableDiffusion1ModelConfig'] - | components['schemas']['StableDiffusion1ModelCheckpointConfig'] - | components['schemas']['StableDiffusion1ModelDiffusersConfig'] - | components['schemas']['VaeModelConfig'] - | components['schemas']['LoRAModelConfig'] - | components['schemas']['ControlNetModelCheckpointConfig'] - | components['schemas']['ControlNetModelDiffusersConfig'] - | components['schemas']['TextualInversionModelConfig'] - | components['schemas']['ONNXStableDiffusion2ModelConfig'] - | components['schemas']['StableDiffusion2ModelCheckpointConfig'] - | components['schemas']['StableDiffusion2ModelDiffusersConfig'] - | components['schemas']['StableDiffusionXLModelCheckpointConfig'] - | components['schemas']['StableDiffusionXLModelDiffusersConfig']; + "application/json": components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"]; }; }; /** @description Incompatible models */ @@ -7659,7 +7120,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7672,7 +7133,7 @@ export type operations = { parameters: { query: { /** @description The category of the image */ - image_category: components['schemas']['ImageCategory']; + image_category: components["schemas"]["ImageCategory"]; /** @description Whether this is an intermediate image */ is_intermediate: boolean; /** @description The board to add this image to, if any */ @@ -7685,14 +7146,14 @@ export type operations = { }; requestBody: { content: { - 'multipart/form-data': components['schemas']['Body_upload_image']; + "multipart/form-data": components["schemas"]["Body_upload_image"]; }; }; responses: { /** @description The image was uploaded successfully */ 201: { content: { - 'application/json': components['schemas']['ImageDTO']; + "application/json": components["schemas"]["ImageDTO"]; }; }; /** @description Image upload failed */ @@ -7702,7 +7163,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7722,13 +7183,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['ImageDTO']; + "application/json": components["schemas"]["ImageDTO"]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7748,13 +7209,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': unknown; + "application/json": unknown; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7772,20 +7233,20 @@ export type operations = { }; requestBody: { content: { - 'application/json': components['schemas']['ImageRecordChanges']; + "application/json": components["schemas"]["ImageRecordChanges"]; }; }; responses: { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['ImageDTO']; + "application/json": components["schemas"]["ImageDTO"]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7799,7 +7260,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': unknown; + "application/json": unknown; }; }; }; @@ -7819,13 +7280,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['ImageMetadata']; + "application/json": components["schemas"]["ImageMetadata"]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7845,7 +7306,7 @@ export type operations = { /** @description Return the full-resolution image */ 200: { content: { - 'image/png': unknown; + "image/png": unknown; }; }; /** @description Image not found */ @@ -7855,7 +7316,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7875,7 +7336,7 @@ export type operations = { /** @description Return the image thumbnail */ 200: { content: { - 'image/webp': unknown; + "image/webp": unknown; }; }; /** @description Image not found */ @@ -7885,7 +7346,7 @@ export type operations = { /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7905,13 +7366,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['ImageUrlsDTO']; + "application/json": components["schemas"]["ImageUrlsDTO"]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7924,9 +7385,9 @@ export type operations = { parameters: { query?: { /** @description The origin of images to list. */ - image_origin?: components['schemas']['ResourceOrigin']; + image_origin?: components["schemas"]["ResourceOrigin"]; /** @description The categories of image to include. */ - categories?: components['schemas']['ImageCategory'][]; + categories?: components["schemas"]["ImageCategory"][]; /** @description Whether to list intermediate images. */ is_intermediate?: boolean; /** @description The board id to filter by. Use 'none' to find images without a board. */ @@ -7941,13 +7402,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['OffsetPaginatedResults_ImageDTO_']; + "application/json": components["schemas"]["OffsetPaginatedResults_ImageDTO_"]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7956,20 +7417,20 @@ export type operations = { delete_images_from_list: { requestBody: { content: { - 'application/json': components['schemas']['Body_delete_images_from_list']; + "application/json": components["schemas"]["Body_delete_images_from_list"]; }; }; responses: { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['DeleteImagesFromListResult']; + "application/json": components["schemas"]["DeleteImagesFromListResult"]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -7978,20 +7439,20 @@ export type operations = { star_images_in_list: { requestBody: { content: { - 'application/json': components['schemas']['Body_star_images_in_list']; + "application/json": components["schemas"]["Body_star_images_in_list"]; }; }; responses: { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['ImagesUpdatedFromListResult']; + "application/json": components["schemas"]["ImagesUpdatedFromListResult"]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -8000,20 +7461,20 @@ export type operations = { unstar_images_in_list: { requestBody: { content: { - 'application/json': components['schemas']['Body_unstar_images_in_list']; + "application/json": components["schemas"]["Body_unstar_images_in_list"]; }; }; responses: { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['ImagesUpdatedFromListResult']; + "application/json": components["schemas"]["ImagesUpdatedFromListResult"]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -8037,15 +7498,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': - | components['schemas']['OffsetPaginatedResults_BoardDTO_'] - | components['schemas']['BoardDTO'][]; + "application/json": components["schemas"]["OffsetPaginatedResults_BoardDTO_"] | components["schemas"]["BoardDTO"][]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -8065,13 +7524,13 @@ export type operations = { /** @description The board was created successfully */ 201: { content: { - 'application/json': components['schemas']['BoardDTO']; + "application/json": components["schemas"]["BoardDTO"]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -8091,13 +7550,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['BoardDTO']; + "application/json": components["schemas"]["BoardDTO"]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -8121,13 +7580,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['DeleteBoardResult']; + "application/json": components["schemas"]["DeleteBoardResult"]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -8145,20 +7604,20 @@ export type operations = { }; requestBody: { content: { - 'application/json': components['schemas']['BoardChanges']; + "application/json": components["schemas"]["BoardChanges"]; }; }; responses: { /** @description The board was updated successfully */ 201: { content: { - 'application/json': components['schemas']['BoardDTO']; + "application/json": components["schemas"]["BoardDTO"]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -8178,13 +7637,13 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': string[]; + "application/json": string[]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -8196,20 +7655,20 @@ export type operations = { add_image_to_board: { requestBody: { content: { - 'application/json': components['schemas']['Body_add_image_to_board']; + "application/json": components["schemas"]["Body_add_image_to_board"]; }; }; responses: { /** @description The image was added to a board successfully */ 201: { content: { - 'application/json': unknown; + "application/json": unknown; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -8221,20 +7680,20 @@ export type operations = { remove_image_from_board: { requestBody: { content: { - 'application/json': components['schemas']['Body_remove_image_from_board']; + "application/json": components["schemas"]["Body_remove_image_from_board"]; }; }; responses: { /** @description The image was removed from the board successfully */ 201: { content: { - 'application/json': unknown; + "application/json": unknown; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -8246,20 +7705,20 @@ export type operations = { add_images_to_board: { requestBody: { content: { - 'application/json': components['schemas']['Body_add_images_to_board']; + "application/json": components["schemas"]["Body_add_images_to_board"]; }; }; responses: { /** @description Images were added to board successfully */ 201: { content: { - 'application/json': components['schemas']['AddImagesToBoardResult']; + "application/json": components["schemas"]["AddImagesToBoardResult"]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -8271,20 +7730,20 @@ export type operations = { remove_images_from_board: { requestBody: { content: { - 'application/json': components['schemas']['Body_remove_images_from_board']; + "application/json": components["schemas"]["Body_remove_images_from_board"]; }; }; responses: { /** @description Images were removed from board successfully */ 201: { content: { - 'application/json': components['schemas']['RemoveImagesFromBoardResult']; + "application/json": components["schemas"]["RemoveImagesFromBoardResult"]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; @@ -8295,7 +7754,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['AppVersion']; + "application/json": components["schemas"]["AppVersion"]; }; }; }; @@ -8306,7 +7765,7 @@ export type operations = { /** @description Successful Response */ 200: { content: { - 'application/json': components['schemas']['AppConfig']; + "application/json": components["schemas"]["AppConfig"]; }; }; }; @@ -8320,7 +7779,7 @@ export type operations = { /** @description The operation was successful */ 200: { content: { - 'application/json': components['schemas']['LogLevel']; + "application/json": components["schemas"]["LogLevel"]; }; }; }; @@ -8332,20 +7791,20 @@ export type operations = { set_log_level: { requestBody: { content: { - 'application/json': components['schemas']['LogLevel']; + "application/json": components["schemas"]["LogLevel"]; }; }; responses: { /** @description The operation was successful */ 200: { content: { - 'application/json': components['schemas']['LogLevel']; + "application/json": components["schemas"]["LogLevel"]; }; }; /** @description Validation Error */ 422: { content: { - 'application/json': components['schemas']['HTTPValidationError']; + "application/json": components["schemas"]["HTTPValidationError"]; }; }; }; From 226721ce517d50a29d22e59ddf843bcccfff28cb Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 27 Aug 2023 03:50:29 +1200 Subject: [PATCH 10/20] feat: Setup UnifiedCanvas to work with new InpaintMaskField --- .../nodes/util/graphBuilders/addVAEToGraph.ts | 11 +++++ .../graphBuilders/buildCanvasInpaintGraph.ts | 37 +++++++++++++++- .../graphBuilders/buildCanvasOutpaintGraph.ts | 42 ++++++++++++++++++- .../buildCanvasSDXLInpaintGraph.ts | 35 +++++++++++++++- .../buildCanvasSDXLOutpaintGraph.ts | 40 +++++++++++++++++- .../nodes/util/graphBuilders/constants.ts | 1 + .../frontend/web/src/services/api/types.ts | 1 + 7 files changed, 161 insertions(+), 6 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts index 360e07062a..a0b6bc9803 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts @@ -9,6 +9,7 @@ import { CANVAS_TEXT_TO_IMAGE_GRAPH, IMAGE_TO_IMAGE_GRAPH, IMAGE_TO_LATENTS, + INPAINT_CREATE_MASK, INPAINT_IMAGE, LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, @@ -117,6 +118,16 @@ export const addVAEToGraph = ( field: 'vae', }, }, + { + source: { + node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER, + field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae', + }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'vae', + }, + }, { source: { node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts index 4b017340cb..231e1b7a13 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts @@ -2,6 +2,7 @@ import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { + CreateInpaintMaskInvocation, ImageBlurInvocation, ImageDTO, ImageToLatentsInvocation, @@ -15,13 +16,14 @@ import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { - CANVAS_INPAINT_GRAPH, - CANVAS_OUTPUT, CANVAS_COHERENCE_DENOISE_LATENTS, CANVAS_COHERENCE_NOISE, CANVAS_COHERENCE_NOISE_INCREMENT, + CANVAS_INPAINT_GRAPH, + CANVAS_OUTPUT, CLIP_SKIP, DENOISE_LATENTS, + INPAINT_CREATE_MASK, INPAINT_IMAGE, INPAINT_IMAGE_RESIZE_DOWN, INPAINT_IMAGE_RESIZE_UP, @@ -127,6 +129,12 @@ export const buildCanvasInpaintGraph = ( is_intermediate: true, fp32: vaePrecision === 'fp32' ? true : false, }, + [INPAINT_CREATE_MASK]: { + type: 'create_inpaint_mask', + id: INPAINT_CREATE_MASK, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }, [NOISE]: { type: 'noise', id: NOISE, @@ -276,11 +284,22 @@ export const buildCanvasInpaintGraph = ( field: 'latents', }, }, + // Create Inpaint Mask { source: { node_id: MASK_BLUR, field: 'image', }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'mask', + }, + }, + { + source: { + node_id: INPAINT_CREATE_MASK, + field: 'inpaint_mask', + }, destination: { node_id: DENOISE_LATENTS, field: 'mask', @@ -459,6 +478,16 @@ export const buildCanvasInpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: INPAINT_IMAGE_RESIZE_UP, + field: 'image', + }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'image', + }, + }, // Color Correct The Inpainted Result { source: { @@ -516,6 +545,10 @@ export const buildCanvasInpaintGraph = ( ...(graph.nodes[MASK_BLUR] as ImageBlurInvocation), image: canvasMaskImage, }; + graph.nodes[INPAINT_CREATE_MASK] = { + ...(graph.nodes[INPAINT_CREATE_MASK] as CreateInpaintMaskInvocation), + image: canvasInitImage, + }; graph.edges.push( // Color Correct The Inpainted Result diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts index 9f424d3dcf..f123f88103 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts @@ -17,13 +17,14 @@ import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { - CANVAS_OUTPAINT_GRAPH, - CANVAS_OUTPUT, CANVAS_COHERENCE_DENOISE_LATENTS, CANVAS_COHERENCE_NOISE, CANVAS_COHERENCE_NOISE_INCREMENT, + CANVAS_OUTPAINT_GRAPH, + CANVAS_OUTPUT, CLIP_SKIP, DENOISE_LATENTS, + INPAINT_CREATE_MASK, INPAINT_IMAGE, INPAINT_IMAGE_RESIZE_DOWN, INPAINT_IMAGE_RESIZE_UP, @@ -153,6 +154,12 @@ export const buildCanvasOutpaintGraph = ( use_cpu, is_intermediate: true, }, + [INPAINT_CREATE_MASK]: { + type: 'create_inpaint_mask', + id: INPAINT_CREATE_MASK, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }, [DENOISE_LATENTS]: { type: 'denoise_latents', id: DENOISE_LATENTS, @@ -317,11 +324,22 @@ export const buildCanvasOutpaintGraph = ( field: 'latents', }, }, + // Create Inpaint Mask { source: { node_id: MASK_BLUR, field: 'image', }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'mask', + }, + }, + { + source: { + node_id: INPAINT_CREATE_MASK, + field: 'inpaint_mask', + }, destination: { node_id: DENOISE_LATENTS, field: 'mask', @@ -522,6 +540,16 @@ export const buildCanvasOutpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'image', + }, + }, // Take combined mask and resize and then blur { source: { @@ -640,6 +668,16 @@ export const buildCanvasOutpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'image', + }, + }, // Color Correct The Inpainted Result { source: { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts index f60c710c64..b768f90628 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts @@ -2,6 +2,7 @@ import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { + CreateInpaintMaskInvocation, ImageBlurInvocation, ImageDTO, ImageToLatentsInvocation, @@ -16,10 +17,11 @@ import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { - CANVAS_OUTPUT, CANVAS_COHERENCE_DENOISE_LATENTS, CANVAS_COHERENCE_NOISE, CANVAS_COHERENCE_NOISE_INCREMENT, + CANVAS_OUTPUT, + INPAINT_CREATE_MASK, INPAINT_IMAGE, INPAINT_IMAGE_RESIZE_DOWN, INPAINT_IMAGE_RESIZE_UP, @@ -136,6 +138,12 @@ export const buildCanvasSDXLInpaintGraph = ( use_cpu, is_intermediate: true, }, + [INPAINT_CREATE_MASK]: { + type: 'create_inpaint_mask', + id: INPAINT_CREATE_MASK, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }, [SDXL_DENOISE_LATENTS]: { type: 'denoise_latents', id: SDXL_DENOISE_LATENTS, @@ -290,11 +298,22 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'latents', }, }, + // Create Inpaint Mask { source: { node_id: MASK_BLUR, field: 'image', }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'mask', + }, + }, + { + source: { + node_id: INPAINT_CREATE_MASK, + field: 'inpaint_mask', + }, destination: { node_id: SDXL_DENOISE_LATENTS, field: 'mask', @@ -473,6 +492,16 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: INPAINT_IMAGE_RESIZE_UP, + field: 'image', + }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'image', + }, + }, // Color Correct The Inpainted Result { source: { @@ -530,6 +559,10 @@ export const buildCanvasSDXLInpaintGraph = ( ...(graph.nodes[MASK_BLUR] as ImageBlurInvocation), image: canvasMaskImage, }; + graph.nodes[INPAINT_CREATE_MASK] = { + ...(graph.nodes[INPAINT_CREATE_MASK] as CreateInpaintMaskInvocation), + image: canvasInitImage, + }; graph.edges.push( // Color Correct The Inpainted Result diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts index 7e98c37233..7911e827a7 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts @@ -18,10 +18,11 @@ import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { - CANVAS_OUTPUT, CANVAS_COHERENCE_DENOISE_LATENTS, CANVAS_COHERENCE_NOISE, CANVAS_COHERENCE_NOISE_INCREMENT, + CANVAS_OUTPUT, + INPAINT_CREATE_MASK, INPAINT_IMAGE, INPAINT_IMAGE_RESIZE_DOWN, INPAINT_IMAGE_RESIZE_UP, @@ -156,6 +157,12 @@ export const buildCanvasSDXLOutpaintGraph = ( use_cpu, is_intermediate: true, }, + [INPAINT_CREATE_MASK]: { + type: 'create_inpaint_mask', + id: INPAINT_CREATE_MASK, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }, [SDXL_DENOISE_LATENTS]: { type: 'denoise_latents', id: SDXL_DENOISE_LATENTS, @@ -331,11 +338,22 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'latents', }, }, + // Create Inpaint Mask { source: { node_id: MASK_BLUR, field: 'image', }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'mask', + }, + }, + { + source: { + node_id: INPAINT_CREATE_MASK, + field: 'inpaint_mask', + }, destination: { node_id: SDXL_DENOISE_LATENTS, field: 'mask', @@ -537,6 +555,16 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'image', + }, + }, // Take combined mask and resize and then blur { source: { @@ -655,6 +683,16 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'image', + }, + }, // Color Correct The Inpainted Result { source: { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts index 6547d4a092..09f6a1c2d1 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts @@ -25,6 +25,7 @@ export const INPAINT_IMAGE_RESIZE_DOWN = 'inpaint_image_resize_down'; export const INPAINT_INFILL = 'inpaint_infill'; export const INPAINT_INFILL_RESIZE_DOWN = 'inpaint_infill_resize_down'; export const INPAINT_FINAL_IMAGE = 'inpaint_final_image'; +export const INPAINT_CREATE_MASK = 'inpaint_create_mask'; export const CANVAS_COHERENCE_DENOISE_LATENTS = 'canvas_coherence_denoise_latents'; export const CANVAS_COHERENCE_NOISE = 'canvas_coherence_noise'; diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index 4e30794a51..031130ed65 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -111,6 +111,7 @@ export type ImageBlurInvocation = s['ImageBlurInvocation']; export type ImageScaleInvocation = s['ImageScaleInvocation']; export type InfillPatchMatchInvocation = s['InfillPatchMatchInvocation']; export type InfillTileInvocation = s['InfillTileInvocation']; +export type CreateInpaintMaskInvocation = s['CreateInpaintMaskInvocation']; export type RandomIntInvocation = s['RandomIntInvocation']; export type CompelInvocation = s['CompelInvocation']; export type DynamicPromptInvocation = s['DynamicPromptInvocation']; From c923d094c6f06d74a994f1b13391acfb8548a22c Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 27 Aug 2023 05:50:13 +1200 Subject: [PATCH 11/20] rename: Inpaint Mask to Denoise Mask --- invokeai/app/invocations/latent.py | 32 ++--- invokeai/app/invocations/primitives.py | 10 +- .../Invocation/fields/InputFieldRenderer.tsx | 8 +- .../fields/inputs/DenoiseMaskInputField.tsx | 17 +++ .../fields/inputs/InpaintMaskInputField.tsx | 17 --- .../web/src/features/nodes/types/constants.ts | 6 +- .../web/src/features/nodes/types/types.ts | 24 ++-- .../nodes/util/fieldTemplateBuilders.ts | 14 +-- .../features/nodes/util/fieldValueBuilders.ts | 2 +- .../graphBuilders/buildCanvasInpaintGraph.ts | 10 +- .../graphBuilders/buildCanvasOutpaintGraph.ts | 6 +- .../buildCanvasSDXLInpaintGraph.ts | 10 +- .../buildCanvasSDXLOutpaintGraph.ts | 6 +- .../frontend/web/src/services/api/schema.d.ts | 110 +++++++++--------- .../frontend/web/src/services/api/types.ts | 2 +- 15 files changed, 137 insertions(+), 137 deletions(-) create mode 100644 invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/DenoiseMaskInputField.tsx delete mode 100644 invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/InpaintMaskInputField.tsx diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index af9147f834..14aa6a56ba 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -21,10 +21,10 @@ from torchvision.transforms.functional import resize as tv_resize from invokeai.app.invocations.metadata import CoreMetadata from invokeai.app.invocations.primitives import ( + DenoiseMaskField, + DenoiseMaskOutput, ImageField, ImageOutput, - InpaintMaskField, - InpaintMaskOutput, LatentsField, LatentsOutput, build_latents_output, @@ -57,16 +57,16 @@ DEFAULT_PRECISION = choose_precision(choose_torch_device()) SAMPLER_NAME_VALUES = Literal[tuple(list(SCHEDULER_MAP.keys()))] -@title("Create Inpaint Mask") -@tags("mask", "inpaint") -class CreateInpaintMaskInvocation(BaseInvocation): - """Creates mask for inpaint model run.""" +@title("Create Denoise Mask") +@tags("mask", "denoise") +class CreateDenoiseMaskInvocation(BaseInvocation): + """Creates mask for denoising model run.""" # Metadata - type: Literal["create_inpaint_mask"] = "create_inpaint_mask" + type: Literal["create_denoise_mask"] = "create_denoise_mask" # Inputs - image: Optional[ImageField] = InputField(default=None, description="Image which will be inpainted") + image: Optional[ImageField] = InputField(default=None, description="Image which will be masked") mask: ImageField = InputField(description="The mask to use when pasting") vae: VaeField = InputField( description=FieldDescriptions.vae, @@ -86,7 +86,7 @@ class CreateInpaintMaskInvocation(BaseInvocation): return mask_tensor @torch.no_grad() - def invoke(self, context: InvocationContext) -> InpaintMaskOutput: + def invoke(self, context: InvocationContext) -> DenoiseMaskOutput: if self.image is not None: image = context.services.images.get_pil_image(self.image.image_name) image = image_resized_to_grid_as_tensor(image.convert("RGB")) @@ -118,8 +118,8 @@ class CreateInpaintMaskInvocation(BaseInvocation): mask_name = f"{context.graph_execution_state_id}__{self.id}_mask" context.services.latents.save(mask_name, mask) - return InpaintMaskOutput( - inpaint_mask=InpaintMaskField( + return DenoiseMaskOutput( + denoise_mask=DenoiseMaskField( mask_name=mask_name, masked_latents_name=masked_latents_name, ), @@ -189,7 +189,7 @@ class DenoiseLatentsInvocation(BaseInvocation): default=None, description=FieldDescriptions.control, input=Input.Connection, ui_order=5 ) latents: Optional[LatentsField] = InputField(description=FieldDescriptions.latents, input=Input.Connection) - mask: Optional[InpaintMaskField] = InputField( + denoise_mask: Optional[DenoiseMaskField] = InputField( default=None, description=FieldDescriptions.mask, ) @@ -403,13 +403,13 @@ class DenoiseLatentsInvocation(BaseInvocation): return num_inference_steps, timesteps, init_timestep def prep_inpaint_mask(self, context, latents): - if self.mask is None: + if self.denoise_mask is None: return None, None - mask = context.services.latents.get(self.mask.mask_name) + mask = context.services.latents.get(self.denoise_mask.mask_name) mask = tv_resize(mask, latents.shape[-2:], T.InterpolationMode.BILINEAR) - if self.mask.masked_latents_name is not None: - masked_latents = context.services.latents.get(self.mask.masked_latents_name) + if self.denoise_mask.masked_latents_name is not None: + masked_latents = context.services.latents.get(self.denoise_mask.masked_latents_name) else: masked_latents = None diff --git a/invokeai/app/invocations/primitives.py b/invokeai/app/invocations/primitives.py index 47e9250edb..49cd49d189 100644 --- a/invokeai/app/invocations/primitives.py +++ b/invokeai/app/invocations/primitives.py @@ -296,21 +296,21 @@ class ImageCollectionInvocation(BaseInvocation): # endregion -# region InpaintMask +# region DenoiseMask -class InpaintMaskField(BaseModel): +class DenoiseMaskField(BaseModel): """An inpaint mask field""" mask_name: str = Field(description="The name of the mask image") masked_latents_name: Optional[str] = Field(description="The name of the masked image latents") -class InpaintMaskOutput(BaseInvocationOutput): +class DenoiseMaskOutput(BaseInvocationOutput): """Base class for nodes that output a single image""" - type: Literal["inpaint_mask_output"] = "inpaint_mask_output" - inpaint_mask: InpaintMaskField = OutputField(description="Mask for inpaint model run") + type: Literal["denoise_mask_output"] = "denoise_mask_output" + denoise_mask: DenoiseMaskField = OutputField(description="Mask for denoise model run") # endregion diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx index 176e8a5905..bb9637cd73 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx @@ -10,10 +10,10 @@ import ColorInputField from './inputs/ColorInputField'; import ConditioningInputField from './inputs/ConditioningInputField'; import ControlInputField from './inputs/ControlInputField'; import ControlNetModelInputField from './inputs/ControlNetModelInputField'; +import DenoiseMaskInputField from './inputs/DenoiseMaskInputField'; import EnumInputField from './inputs/EnumInputField'; import ImageCollectionInputField from './inputs/ImageCollectionInputField'; import ImageInputField from './inputs/ImageInputField'; -import InpaintMaskInputField from './inputs/InpaintMaskInputField'; import LatentsInputField from './inputs/LatentsInputField'; import LoRAModelInputField from './inputs/LoRAModelInputField'; import MainModelInputField from './inputs/MainModelInputField'; @@ -107,11 +107,11 @@ const InputFieldRenderer = ({ nodeId, fieldName }: InputFieldProps) => { } if ( - field?.type === 'InpaintMaskField' && - fieldTemplate?.type === 'InpaintMaskField' + field?.type === 'DenoiseMaskField' && + fieldTemplate?.type === 'DenoiseMaskField' ) { return ( - +) => { + return null; +}; + +export default memo(DenoiseMaskInputFieldComponent); diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/InpaintMaskInputField.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/InpaintMaskInputField.tsx deleted file mode 100644 index 248d5922af..0000000000 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/InpaintMaskInputField.tsx +++ /dev/null @@ -1,17 +0,0 @@ -import { - FieldComponentProps, - InpaintMaskInputFieldTemplate, - InpaintMaskInputFieldValue, -} from 'features/nodes/types/types'; -import { memo } from 'react'; - -const InpaintMaskInputFieldComponent = ( - _props: FieldComponentProps< - InpaintMaskInputFieldValue, - InpaintMaskInputFieldTemplate - > -) => { - return null; -}; - -export default memo(InpaintMaskInputFieldComponent); diff --git a/invokeai/frontend/web/src/features/nodes/types/constants.ts b/invokeai/frontend/web/src/features/nodes/types/constants.ts index 850e270d09..520a65524f 100644 --- a/invokeai/frontend/web/src/features/nodes/types/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/types/constants.ts @@ -59,9 +59,9 @@ export const FIELDS: Record = { description: 'Images may be passed between nodes.', color: 'purple.500', }, - InpaintMaskField: { - title: 'Inpaint Mask', - description: 'Inpaint Mask may be passed between nodes', + DenoiseMaskField: { + title: 'Denoise Mask', + description: 'Denoise Mask may be passed between nodes', color: 'purple.500', }, LatentsField: { diff --git a/invokeai/frontend/web/src/features/nodes/types/types.ts b/invokeai/frontend/web/src/features/nodes/types/types.ts index 11672b2a64..0eda9030a6 100644 --- a/invokeai/frontend/web/src/features/nodes/types/types.ts +++ b/invokeai/frontend/web/src/features/nodes/types/types.ts @@ -64,7 +64,7 @@ export const zFieldType = z.enum([ 'string', 'array', 'ImageField', - 'InpaintMaskField', + 'DenoiseMaskField', 'LatentsField', 'ConditioningField', 'ControlField', @@ -121,7 +121,7 @@ export type InputFieldTemplate = | StringInputFieldTemplate | BooleanInputFieldTemplate | ImageInputFieldTemplate - | InpaintMaskInputFieldTemplate + | DenoiseMaskInputFieldTemplate | LatentsInputFieldTemplate | ConditioningInputFieldTemplate | UNetInputFieldTemplate @@ -207,11 +207,11 @@ export const zConditioningField = z.object({ }); export type ConditioningField = z.infer; -export const zInpaintMaskField = z.object({ +export const zDenoiseMaskField = z.object({ mask_name: z.string().trim().min(1), masked_latents_name: z.string().trim().min(1).optional(), }); -export type InpaintMaskFieldValue = z.infer; +export type DenoiseMaskFieldValue = z.infer; export const zIntegerInputFieldValue = zInputFieldValueBase.extend({ type: z.literal('integer'), @@ -249,12 +249,12 @@ export const zLatentsInputFieldValue = zInputFieldValueBase.extend({ }); export type LatentsInputFieldValue = z.infer; -export const zInpaintMaskInputFieldValue = zInputFieldValueBase.extend({ - type: z.literal('InpaintMaskField'), - value: zInpaintMaskField.optional(), +export const zDenoiseMaskInputFieldValue = zInputFieldValueBase.extend({ + type: z.literal('DenoiseMaskField'), + value: zDenoiseMaskField.optional(), }); -export type InpaintMaskInputFieldValue = z.infer< - typeof zInpaintMaskInputFieldValue +export type DenoiseMaskInputFieldValue = z.infer< + typeof zDenoiseMaskInputFieldValue >; export const zConditioningInputFieldValue = zInputFieldValueBase.extend({ @@ -475,7 +475,7 @@ export const zInputFieldValue = z.discriminatedUnion('type', [ zBooleanInputFieldValue, zImageInputFieldValue, zLatentsInputFieldValue, - zInpaintMaskInputFieldValue, + zDenoiseMaskInputFieldValue, zConditioningInputFieldValue, zUNetInputFieldValue, zClipInputFieldValue, @@ -549,9 +549,9 @@ export type ImageCollectionInputFieldTemplate = InputFieldTemplateBase & { type: 'ImageCollection'; }; -export type InpaintMaskInputFieldTemplate = InputFieldTemplateBase & { +export type DenoiseMaskInputFieldTemplate = InputFieldTemplateBase & { default: undefined; - type: 'InpaintMaskField'; + type: 'DenoiseMaskField'; }; export type LatentsInputFieldTemplate = InputFieldTemplateBase & { diff --git a/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts b/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts index b36629b8cb..7fdc73407e 100644 --- a/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts +++ b/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts @@ -8,12 +8,12 @@ import { ConditioningInputFieldTemplate, ControlInputFieldTemplate, ControlNetModelInputFieldTemplate, + DenoiseMaskInputFieldTemplate, EnumInputFieldTemplate, FieldType, FloatInputFieldTemplate, ImageCollectionInputFieldTemplate, ImageInputFieldTemplate, - InpaintMaskInputFieldTemplate, InputFieldTemplateBase, IntegerInputFieldTemplate, InvocationFieldSchema, @@ -264,13 +264,13 @@ const buildImageCollectionInputFieldTemplate = ({ return template; }; -const buildInpaintMaskInputFieldTemplate = ({ +const buildDenoiseMaskInputFieldTemplate = ({ schemaObject, baseField, -}: BuildInputFieldArg): InpaintMaskInputFieldTemplate => { - const template: InpaintMaskInputFieldTemplate = { +}: BuildInputFieldArg): DenoiseMaskInputFieldTemplate => { + const template: DenoiseMaskInputFieldTemplate = { ...baseField, - type: 'InpaintMaskField', + type: 'DenoiseMaskField', default: schemaObject.default ?? undefined, }; @@ -512,8 +512,8 @@ export const buildInputFieldTemplate = ( baseField, }); } - if (fieldType === 'InpaintMaskField') { - return buildInpaintMaskInputFieldTemplate({ + if (fieldType === 'DenoiseMaskField') { + return buildDenoiseMaskInputFieldTemplate({ schemaObject: fieldSchema, baseField, }); diff --git a/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts b/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts index e4b72c90cd..1d06d644d1 100644 --- a/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts +++ b/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts @@ -49,7 +49,7 @@ export const buildInputFieldValue = ( fieldValue.value = []; } - if (template.type === 'InpaintMaskField') { + if (template.type === 'DenoiseMaskField') { fieldValue.value = undefined; } diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts index 231e1b7a13..b4e974dadd 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts @@ -2,7 +2,7 @@ import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { - CreateInpaintMaskInvocation, + CreateDenoiseMaskInvocation, ImageBlurInvocation, ImageDTO, ImageToLatentsInvocation, @@ -130,7 +130,7 @@ export const buildCanvasInpaintGraph = ( fp32: vaePrecision === 'fp32' ? true : false, }, [INPAINT_CREATE_MASK]: { - type: 'create_inpaint_mask', + type: 'create_denoise_mask', id: INPAINT_CREATE_MASK, is_intermediate: true, fp32: vaePrecision === 'fp32' ? true : false, @@ -298,11 +298,11 @@ export const buildCanvasInpaintGraph = ( { source: { node_id: INPAINT_CREATE_MASK, - field: 'inpaint_mask', + field: 'denoise_mask', }, destination: { node_id: DENOISE_LATENTS, - field: 'mask', + field: 'denoise_mask', }, }, // Iterate @@ -546,7 +546,7 @@ export const buildCanvasInpaintGraph = ( image: canvasMaskImage, }; graph.nodes[INPAINT_CREATE_MASK] = { - ...(graph.nodes[INPAINT_CREATE_MASK] as CreateInpaintMaskInvocation), + ...(graph.nodes[INPAINT_CREATE_MASK] as CreateDenoiseMaskInvocation), image: canvasInitImage, }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts index f123f88103..8916dd0652 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts @@ -155,7 +155,7 @@ export const buildCanvasOutpaintGraph = ( is_intermediate: true, }, [INPAINT_CREATE_MASK]: { - type: 'create_inpaint_mask', + type: 'create_denoise_mask', id: INPAINT_CREATE_MASK, is_intermediate: true, fp32: vaePrecision === 'fp32' ? true : false, @@ -338,11 +338,11 @@ export const buildCanvasOutpaintGraph = ( { source: { node_id: INPAINT_CREATE_MASK, - field: 'inpaint_mask', + field: 'denoise_mask', }, destination: { node_id: DENOISE_LATENTS, - field: 'mask', + field: 'denoise_mask', }, }, // Iterate diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts index b768f90628..f51c2444d4 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts @@ -2,7 +2,7 @@ import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { - CreateInpaintMaskInvocation, + CreateDenoiseMaskInvocation, ImageBlurInvocation, ImageDTO, ImageToLatentsInvocation, @@ -139,7 +139,7 @@ export const buildCanvasSDXLInpaintGraph = ( is_intermediate: true, }, [INPAINT_CREATE_MASK]: { - type: 'create_inpaint_mask', + type: 'create_denoise_mask', id: INPAINT_CREATE_MASK, is_intermediate: true, fp32: vaePrecision === 'fp32' ? true : false, @@ -312,11 +312,11 @@ export const buildCanvasSDXLInpaintGraph = ( { source: { node_id: INPAINT_CREATE_MASK, - field: 'inpaint_mask', + field: 'denoise_mask', }, destination: { node_id: SDXL_DENOISE_LATENTS, - field: 'mask', + field: 'denoise_mask', }, }, // Iterate @@ -560,7 +560,7 @@ export const buildCanvasSDXLInpaintGraph = ( image: canvasMaskImage, }; graph.nodes[INPAINT_CREATE_MASK] = { - ...(graph.nodes[INPAINT_CREATE_MASK] as CreateInpaintMaskInvocation), + ...(graph.nodes[INPAINT_CREATE_MASK] as CreateDenoiseMaskInvocation), image: canvasInitImage, }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts index 7911e827a7..05432f0274 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts @@ -158,7 +158,7 @@ export const buildCanvasSDXLOutpaintGraph = ( is_intermediate: true, }, [INPAINT_CREATE_MASK]: { - type: 'create_inpaint_mask', + type: 'create_denoise_mask', id: INPAINT_CREATE_MASK, is_intermediate: true, fp32: vaePrecision === 'fp32' ? true : false, @@ -352,11 +352,11 @@ export const buildCanvasSDXLOutpaintGraph = ( { source: { node_id: INPAINT_CREATE_MASK, - field: 'inpaint_mask', + field: 'denoise_mask', }, destination: { node_id: SDXL_DENOISE_LATENTS, - field: 'mask', + field: 'denoise_mask', }, }, // Iterate diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index 71a14c9bd3..aec77f4ed9 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -1585,10 +1585,10 @@ export type components = { refiner_start?: number; }; /** - * Create Inpaint Mask - * @description Creates mask for inpaint model run. + * Create Denoise Mask + * @description Creates mask for denoising model run. */ - CreateInpaintMaskInvocation: { + CreateDenoiseMaskInvocation: { /** * Id * @description The id of this node. Must be unique among all nodes. @@ -1602,13 +1602,13 @@ export type components = { is_intermediate?: boolean; /** * Type - * @default create_inpaint_mask + * @default create_denoise_mask * @enum {string} */ - type: "create_inpaint_mask"; + type: "create_denoise_mask"; /** * Image - * @description Image which will be inpainted + * @description Image which will be masked */ image?: components["schemas"]["ImageField"]; /** @@ -1759,10 +1759,10 @@ export type components = { */ latents?: components["schemas"]["LatentsField"]; /** - * Mask + * Denoise Mask * @description The mask to use for the operation */ - mask?: components["schemas"]["InpaintMaskField"]; + denoise_mask?: components["schemas"]["DenoiseMaskField"]; /** * Positive Conditioning * @description Positive conditioning tensor @@ -1779,6 +1779,39 @@ export type components = { */ unet?: components["schemas"]["UNetField"]; }; + /** + * DenoiseMaskField + * @description An inpaint mask field + */ + DenoiseMaskField: { + /** + * Mask Name + * @description The name of the mask image + */ + mask_name: string; + /** + * Masked Latents Name + * @description The name of the masked image latents + */ + masked_latents_name?: string; + }; + /** + * DenoiseMaskOutput + * @description Base class for nodes that output a single image + */ + DenoiseMaskOutput: { + /** + * Type + * @default denoise_mask_output + * @enum {string} + */ + type: "denoise_mask_output"; + /** + * Denoise Mask + * @description Mask for denoise model run + */ + denoise_mask: components["schemas"]["DenoiseMaskField"]; + }; /** * Divide Integers * @description Divides two numbers @@ -2059,7 +2092,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CreateInpaintMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; + [key: string]: components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; /** * Edges @@ -2102,7 +2135,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["BooleanOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["InpaintMaskOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"]; + [key: string]: components["schemas"]["BooleanOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"]; }; /** * Errors @@ -3257,39 +3290,6 @@ export type components = { */ seed?: number; }; - /** - * InpaintMaskField - * @description An inpaint mask field - */ - InpaintMaskField: { - /** - * Mask Name - * @description The name of the mask image - */ - mask_name: string; - /** - * Masked Latents Name - * @description The name of the masked image latents - */ - masked_latents_name?: string; - }; - /** - * InpaintMaskOutput - * @description Base class for nodes that output a single image - */ - InpaintMaskOutput: { - /** - * Type - * @default inpaint_mask_output - * @enum {string} - */ - type: "inpaint_mask_output"; - /** - * Inpaint Mask - * @description Mask for inpaint model run - */ - inpaint_mask: components["schemas"]["InpaintMaskField"]; - }; /** * Integer Primitive Collection * @description A collection of integer primitive values @@ -6408,17 +6408,11 @@ export type components = { */ StableDiffusionOnnxModelFormat: "olive" | "onnx"; /** - * StableDiffusion2ModelFormat + * StableDiffusionXLModelFormat * @description An enumeration. * @enum {string} */ - StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; - /** - * ControlNetModelFormat - * @description An enumeration. - * @enum {string} - */ - ControlNetModelFormat: "checkpoint" | "diffusers"; + StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusion1ModelFormat * @description An enumeration. @@ -6426,11 +6420,17 @@ export type components = { */ StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; /** - * StableDiffusionXLModelFormat + * ControlNetModelFormat * @description An enumeration. * @enum {string} */ - StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; + ControlNetModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusion2ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; @@ -6547,7 +6547,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CreateInpaintMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; + "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; }; responses: { @@ -6588,7 +6588,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CreateInpaintMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; + "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; }; responses: { diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index 031130ed65..6b064edab3 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -111,7 +111,7 @@ export type ImageBlurInvocation = s['ImageBlurInvocation']; export type ImageScaleInvocation = s['ImageScaleInvocation']; export type InfillPatchMatchInvocation = s['InfillPatchMatchInvocation']; export type InfillTileInvocation = s['InfillTileInvocation']; -export type CreateInpaintMaskInvocation = s['CreateInpaintMaskInvocation']; +export type CreateDenoiseMaskInvocation = s['CreateDenoiseMaskInvocation']; export type RandomIntInvocation = s['RandomIntInvocation']; export type CompelInvocation = s['CompelInvocation']; export type DynamicPromptInvocation = s['DynamicPromptInvocation']; From 521da555d6e87a6943b288712215a6a3bbdebbc4 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 27 Aug 2023 06:09:02 +1200 Subject: [PATCH 12/20] feat: Update color of Denoise Mask socket --- invokeai/frontend/web/src/features/nodes/types/constants.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/nodes/types/constants.ts b/invokeai/frontend/web/src/features/nodes/types/constants.ts index 520a65524f..87598de530 100644 --- a/invokeai/frontend/web/src/features/nodes/types/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/types/constants.ts @@ -62,7 +62,7 @@ export const FIELDS: Record = { DenoiseMaskField: { title: 'Denoise Mask', description: 'Denoise Mask may be passed between nodes', - color: 'purple.500', + color: 'red.300', }, LatentsField: { title: 'Latents', From 249048aae7758c665413427c99c5ab888a822681 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 27 Aug 2023 06:14:35 +1200 Subject: [PATCH 13/20] fix: Reorder DenoiseMask socket fields --- invokeai/app/invocations/latent.py | 47 +++++++++++++----------------- 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 14aa6a56ba..387fcb27d7 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -8,27 +8,21 @@ import numpy as np import torch import torchvision.transforms as T from diffusers.image_processor import VaeImageProcessor -from diffusers.models.attention_processor import ( - AttnProcessor2_0, - LoRAAttnProcessor2_0, - LoRAXFormersAttnProcessor, - XFormersAttnProcessor, -) +from diffusers.models.attention_processor import (AttnProcessor2_0, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + XFormersAttnProcessor) from diffusers.schedulers import DPMSolverSDEScheduler from diffusers.schedulers import SchedulerMixin as Scheduler from pydantic import validator from torchvision.transforms.functional import resize as tv_resize from invokeai.app.invocations.metadata import CoreMetadata -from invokeai.app.invocations.primitives import ( - DenoiseMaskField, - DenoiseMaskOutput, - ImageField, - ImageOutput, - LatentsField, - LatentsOutput, - build_latents_output, -) +from invokeai.app.invocations.primitives import (DenoiseMaskField, + DenoiseMaskOutput, ImageField, + ImageOutput, LatentsField, + LatentsOutput, + build_latents_output) from invokeai.app.util.controlnet_utils import prepare_control_image from invokeai.app.util.step_callback import stable_diffusion_step_callback from invokeai.backend.model_management.models import ModelType, SilenceWarnings @@ -37,16 +31,16 @@ from ...backend.model_management.lora import ModelPatcher from ...backend.model_management.models import BaseModelType from ...backend.stable_diffusion import PipelineIntermediateState from ...backend.stable_diffusion.diffusers_pipeline import ( - ConditioningData, - ControlNetData, - StableDiffusionGeneratorPipeline, - image_resized_to_grid_as_tensor, -) -from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import PostprocessingSettings + ConditioningData, ControlNetData, StableDiffusionGeneratorPipeline, + image_resized_to_grid_as_tensor) +from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import \ + PostprocessingSettings from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP from ...backend.util.devices import choose_precision, choose_torch_device from ..models.image import ImageCategory, ResourceOrigin -from .baseinvocation import BaseInvocation, FieldDescriptions, Input, InputField, InvocationContext, UIType, tags, title +from .baseinvocation import (BaseInvocation, FieldDescriptions, Input, + InputField, InvocationContext, UIType, tags, + title) from .compel import ConditioningField from .controlnet_image_processors import ControlField from .model import ModelInfo, UNetField, VaeField @@ -66,14 +60,15 @@ class CreateDenoiseMaskInvocation(BaseInvocation): type: Literal["create_denoise_mask"] = "create_denoise_mask" # Inputs - image: Optional[ImageField] = InputField(default=None, description="Image which will be masked") - mask: ImageField = InputField(description="The mask to use when pasting") vae: VaeField = InputField( description=FieldDescriptions.vae, input=Input.Connection, + ui_order=0 ) - tiled: bool = InputField(default=False, description=FieldDescriptions.tiled) - fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32) + image: Optional[ImageField] = InputField(default=None, description="Image which will be masked", ui_order=1) + mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2) + tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3) + fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32, ui_order=4) def prep_mask_tensor(self, mask_image): if mask_image.mode != "L": From b18695df6fac936d0502039168a1cd8c09d08b4b Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 27 Aug 2023 06:16:13 +1200 Subject: [PATCH 14/20] fix: Update color of denoise mask socket The previous red look too much like the error color. --- invokeai/frontend/web/src/features/nodes/types/constants.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/nodes/types/constants.ts b/invokeai/frontend/web/src/features/nodes/types/constants.ts index 87598de530..6809f3f6bb 100644 --- a/invokeai/frontend/web/src/features/nodes/types/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/types/constants.ts @@ -62,7 +62,7 @@ export const FIELDS: Record = { DenoiseMaskField: { title: 'Denoise Mask', description: 'Denoise Mask may be passed between nodes', - color: 'red.300', + color: 'red.700', }, LatentsField: { title: 'Latents', From 3f8d17d6b75a9c96704e7960cadd9fd6ebe8363e Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 27 Aug 2023 06:17:08 +1200 Subject: [PATCH 15/20] chore: Black linting --- invokeai/app/invocations/latent.py | 44 ++++++++++++++++-------------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 387fcb27d7..90255bd55c 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -8,21 +8,27 @@ import numpy as np import torch import torchvision.transforms as T from diffusers.image_processor import VaeImageProcessor -from diffusers.models.attention_processor import (AttnProcessor2_0, - LoRAAttnProcessor2_0, - LoRAXFormersAttnProcessor, - XFormersAttnProcessor) +from diffusers.models.attention_processor import ( + AttnProcessor2_0, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + XFormersAttnProcessor, +) from diffusers.schedulers import DPMSolverSDEScheduler from diffusers.schedulers import SchedulerMixin as Scheduler from pydantic import validator from torchvision.transforms.functional import resize as tv_resize from invokeai.app.invocations.metadata import CoreMetadata -from invokeai.app.invocations.primitives import (DenoiseMaskField, - DenoiseMaskOutput, ImageField, - ImageOutput, LatentsField, - LatentsOutput, - build_latents_output) +from invokeai.app.invocations.primitives import ( + DenoiseMaskField, + DenoiseMaskOutput, + ImageField, + ImageOutput, + LatentsField, + LatentsOutput, + build_latents_output, +) from invokeai.app.util.controlnet_utils import prepare_control_image from invokeai.app.util.step_callback import stable_diffusion_step_callback from invokeai.backend.model_management.models import ModelType, SilenceWarnings @@ -31,16 +37,16 @@ from ...backend.model_management.lora import ModelPatcher from ...backend.model_management.models import BaseModelType from ...backend.stable_diffusion import PipelineIntermediateState from ...backend.stable_diffusion.diffusers_pipeline import ( - ConditioningData, ControlNetData, StableDiffusionGeneratorPipeline, - image_resized_to_grid_as_tensor) -from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import \ - PostprocessingSettings + ConditioningData, + ControlNetData, + StableDiffusionGeneratorPipeline, + image_resized_to_grid_as_tensor, +) +from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import PostprocessingSettings from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP from ...backend.util.devices import choose_precision, choose_torch_device from ..models.image import ImageCategory, ResourceOrigin -from .baseinvocation import (BaseInvocation, FieldDescriptions, Input, - InputField, InvocationContext, UIType, tags, - title) +from .baseinvocation import BaseInvocation, FieldDescriptions, Input, InputField, InvocationContext, UIType, tags, title from .compel import ConditioningField from .controlnet_image_processors import ControlField from .model import ModelInfo, UNetField, VaeField @@ -60,11 +66,7 @@ class CreateDenoiseMaskInvocation(BaseInvocation): type: Literal["create_denoise_mask"] = "create_denoise_mask" # Inputs - vae: VaeField = InputField( - description=FieldDescriptions.vae, - input=Input.Connection, - ui_order=0 - ) + vae: VaeField = InputField(description=FieldDescriptions.vae, input=Input.Connection, ui_order=0) image: Optional[ImageField] = InputField(default=None, description="Image which will be masked", ui_order=1) mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2) tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3) From 71c3955530381debfab53af8084b34a252c97e5c Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 27 Aug 2023 08:26:23 +1200 Subject: [PATCH 16/20] feat: Add Scale Before Processing To Canvas Txt2Img / Img2Img (w/ SDXL) --- invokeai/app/invocations/image.py | 5 + .../nodes/util/graphBuilders/addVAEToGraph.ts | 7 +- .../buildCanvasImageToImageGraph.ts | 160 +++++++++-------- .../buildCanvasSDXLImageToImageGraph.ts | 165 +++++++++--------- .../buildCanvasSDXLTextToImageGraph.ts | 87 +++++++-- .../buildCanvasTextToImageGraph.ts | 87 +++++++-- .../nodes/util/graphBuilders/constants.ts | 1 + .../frontend/web/src/services/api/schema.d.ts | 39 +++-- 8 files changed, 350 insertions(+), 201 deletions(-) diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index 36157e195a..68f71e6d99 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -375,6 +375,11 @@ class ImageResizeInvocation(BaseInvocation): width: int = InputField(default=512, ge=64, multiple_of=8, description="The width to resize to (px)") height: int = InputField(default=512, ge=64, multiple_of=8, description="The height to resize to (px)") resample_mode: PIL_RESAMPLING_MODES = InputField(default="bicubic", description="The resampling mode") + metadata: CoreMetadata = InputField( + default=None, + description=FieldDescriptions.core_metadata, + ui_hidden=True, + ) def invoke(self, context: InvocationContext) -> ImageOutput: image = context.services.images.get_pil_image(self.image.image_name) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts index a0b6bc9803..8fbd94e491 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts @@ -31,6 +31,11 @@ export const addVAEToGraph = ( modelLoaderNodeId: string = MAIN_MODEL_LOADER ): void => { const { vae } = state.generation; + const { boundingBoxScaleMethod } = state.canvas; + + const isUsingScaledDimensions = ['auto', 'manual'].includes( + boundingBoxScaleMethod + ); const isAutoVae = !vae; const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as @@ -77,7 +82,7 @@ export const addVAEToGraph = ( field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae', }, destination: { - node_id: CANVAS_OUTPUT, + node_id: isUsingScaledDimensions ? LATENTS_TO_IMAGE : CANVAS_OUTPUT, field: 'vae', }, }); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts index a68aeef392..d3334d31c3 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts @@ -2,11 +2,7 @@ import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { initialGenerationState } from 'features/parameters/store/generationSlice'; -import { - ImageDTO, - ImageResizeInvocation, - ImageToLatentsInvocation, -} from 'services/api/types'; +import { ImageDTO, ImageToLatentsInvocation } from 'services/api/types'; import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addLoRAsToGraph } from './addLoRAsToGraph'; @@ -19,12 +15,13 @@ import { CLIP_SKIP, DENOISE_LATENTS, IMAGE_TO_LATENTS, + IMG2IMG_RESIZE, + LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, - RESIZE, } from './constants'; /** @@ -43,6 +40,7 @@ export const buildCanvasImageToImageGraph = ( scheduler, steps, img2imgStrength: strength, + vaePrecision, clipSkip, shouldUseCpuNoise, shouldUseNoiseSettings, @@ -51,7 +49,15 @@ export const buildCanvasImageToImageGraph = ( // The bounding box determines width and height, not the width and height params const { width, height } = state.canvas.boundingBoxDimensions; - const { shouldAutoSave } = state.canvas; + const { + scaledBoundingBoxDimensions, + boundingBoxScaleMethod, + shouldAutoSave, + } = state.canvas; + + const isUsingScaledDimensions = ['auto', 'manual'].includes( + boundingBoxScaleMethod + ); if (!model) { log.error('No model found in state'); @@ -104,15 +110,17 @@ export const buildCanvasImageToImageGraph = ( id: NOISE, is_intermediate: true, use_cpu, + width: !isUsingScaledDimensions + ? width + : scaledBoundingBoxDimensions.width, + height: !isUsingScaledDimensions + ? height + : scaledBoundingBoxDimensions.height, }, [IMAGE_TO_LATENTS]: { type: 'i2l', id: IMAGE_TO_LATENTS, is_intermediate: true, - // must be set manually later, bc `fit` parameter may require a resize node inserted - // image: { - // image_name: initialImage.image_name, - // }, }, [DENOISE_LATENTS]: { type: 'denoise_latents', @@ -214,82 +222,84 @@ export const buildCanvasImageToImageGraph = ( field: 'latents', }, }, - // Decode the denoised latents to an image + ], + }; + + // Decode Latents To Image & Handle Scaled Before Processing + if (isUsingScaledDimensions) { + graph.nodes[IMG2IMG_RESIZE] = { + id: IMG2IMG_RESIZE, + type: 'img_resize', + is_intermediate: true, + image: initialImage, + width: scaledBoundingBoxDimensions.width, + height: scaledBoundingBoxDimensions.height, + }; + graph.nodes[LATENTS_TO_IMAGE] = { + id: LATENTS_TO_IMAGE, + type: 'l2i', + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }; + graph.nodes[CANVAS_OUTPUT] = { + id: CANVAS_OUTPUT, + type: 'img_resize', + is_intermediate: !shouldAutoSave, + width: width, + height: height, + }; + + graph.edges.push( + { + source: { + node_id: IMG2IMG_RESIZE, + field: 'image', + }, + destination: { + node_id: IMAGE_TO_LATENTS, + field: 'image', + }, + }, { source: { node_id: DENOISE_LATENTS, field: 'latents', }, destination: { - node_id: CANVAS_OUTPUT, + node_id: LATENTS_TO_IMAGE, field: 'latents', }, }, - ], - }; - - // handle `fit` - if (initialImage.width !== width || initialImage.height !== height) { - // The init image needs to be resized to the specified width and height before being passed to `IMAGE_TO_LATENTS` - - // Create a resize node, explicitly setting its image - const resizeNode: ImageResizeInvocation = { - id: RESIZE, - type: 'img_resize', - image: { - image_name: initialImage.image_name, - }, - is_intermediate: true, - width, - height, - }; - - graph.nodes[RESIZE] = resizeNode; - - // The `RESIZE` node then passes its image to `IMAGE_TO_LATENTS` - graph.edges.push({ - source: { node_id: RESIZE, field: 'image' }, - destination: { - node_id: IMAGE_TO_LATENTS, - field: 'image', - }, - }); - - // The `RESIZE` node also passes its width and height to `NOISE` - graph.edges.push({ - source: { node_id: RESIZE, field: 'width' }, - destination: { - node_id: NOISE, - field: 'width', - }, - }); - - graph.edges.push({ - source: { node_id: RESIZE, field: 'height' }, - destination: { - node_id: NOISE, - field: 'height', - }, - }); + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'image', + }, + } + ); } else { - // We are not resizing, so we need to set the image on the `IMAGE_TO_LATENTS` node explicitly - (graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image = { - image_name: initialImage.image_name, + graph.nodes[CANVAS_OUTPUT] = { + type: 'l2i', + id: CANVAS_OUTPUT, + is_intermediate: !shouldAutoSave, + fp32: vaePrecision === 'fp32' ? true : false, }; - // Pass the image's dimensions to the `NOISE` node + (graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image = + initialImage; + graph.edges.push({ - source: { node_id: IMAGE_TO_LATENTS, field: 'width' }, - destination: { - node_id: NOISE, - field: 'width', + source: { + node_id: DENOISE_LATENTS, + field: 'latents', }, - }); - graph.edges.push({ - source: { node_id: IMAGE_TO_LATENTS, field: 'height' }, destination: { - node_id: NOISE, - field: 'height', + node_id: CANVAS_OUTPUT, + field: 'latents', }, }); } @@ -300,8 +310,10 @@ export const buildCanvasImageToImageGraph = ( type: 'metadata_accumulator', generation_mode: 'img2img', cfg_scale, - height, - width, + width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width, + height: !isUsingScaledDimensions + ? height + : scaledBoundingBoxDimensions.height, positive_prompt: '', // set in addDynamicPromptsToGraph negative_prompt: negativePrompt, model, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts index ef32943bc8..ea46e8a956 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts @@ -2,11 +2,7 @@ import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { initialGenerationState } from 'features/parameters/store/generationSlice'; -import { - ImageDTO, - ImageResizeInvocation, - ImageToLatentsInvocation, -} from 'services/api/types'; +import { ImageDTO, ImageToLatentsInvocation } from 'services/api/types'; import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; @@ -17,11 +13,12 @@ import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { CANVAS_OUTPUT, IMAGE_TO_LATENTS, + IMG2IMG_RESIZE, + LATENTS_TO_IMAGE, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, - RESIZE, SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER, @@ -59,7 +56,15 @@ export const buildCanvasSDXLImageToImageGraph = ( // The bounding box determines width and height, not the width and height params const { width, height } = state.canvas.boundingBoxDimensions; - const { shouldAutoSave } = state.canvas; + const { + scaledBoundingBoxDimensions, + boundingBoxScaleMethod, + shouldAutoSave, + } = state.canvas; + + const isUsingScaledDimensions = ['auto', 'manual'].includes( + boundingBoxScaleMethod + ); if (!model) { log.error('No model found in state'); @@ -109,16 +114,18 @@ export const buildCanvasSDXLImageToImageGraph = ( id: NOISE, is_intermediate: true, use_cpu, + width: !isUsingScaledDimensions + ? width + : scaledBoundingBoxDimensions.width, + height: !isUsingScaledDimensions + ? height + : scaledBoundingBoxDimensions.height, }, [IMAGE_TO_LATENTS]: { type: 'i2l', id: IMAGE_TO_LATENTS, is_intermediate: true, fp32: vaePrecision === 'fp32' ? true : false, - // must be set manually later, bc `fit` parameter may require a resize node inserted - // image: { - // image_name: initialImage.image_name, - // }, }, [SDXL_DENOISE_LATENTS]: { type: 'denoise_latents', @@ -132,12 +139,6 @@ export const buildCanvasSDXLImageToImageGraph = ( : 1 - strength, denoising_end: shouldUseSDXLRefiner ? refinerStart : 1, }, - [CANVAS_OUTPUT]: { - type: 'l2i', - id: CANVAS_OUTPUT, - is_intermediate: !shouldAutoSave, - fp32: vaePrecision === 'fp32' ? true : false, - }, }, edges: [ // Connect Model Loader To UNet & CLIP @@ -232,82 +233,84 @@ export const buildCanvasSDXLImageToImageGraph = ( field: 'latents', }, }, - // Decode denoised latents to an image + ], + }; + + // Decode Latents To Image & Handle Scaled Before Processing + if (isUsingScaledDimensions) { + graph.nodes[IMG2IMG_RESIZE] = { + id: IMG2IMG_RESIZE, + type: 'img_resize', + is_intermediate: true, + image: initialImage, + width: scaledBoundingBoxDimensions.width, + height: scaledBoundingBoxDimensions.height, + }; + graph.nodes[LATENTS_TO_IMAGE] = { + id: LATENTS_TO_IMAGE, + type: 'l2i', + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }; + graph.nodes[CANVAS_OUTPUT] = { + id: CANVAS_OUTPUT, + type: 'img_resize', + is_intermediate: !shouldAutoSave, + width: width, + height: height, + }; + + graph.edges.push( + { + source: { + node_id: IMG2IMG_RESIZE, + field: 'image', + }, + destination: { + node_id: IMAGE_TO_LATENTS, + field: 'image', + }, + }, { source: { node_id: SDXL_DENOISE_LATENTS, field: 'latents', }, destination: { - node_id: CANVAS_OUTPUT, + node_id: LATENTS_TO_IMAGE, field: 'latents', }, }, - ], - }; - - // handle `fit` - if (initialImage.width !== width || initialImage.height !== height) { - // The init image needs to be resized to the specified width and height before being passed to `IMAGE_TO_LATENTS` - - // Create a resize node, explicitly setting its image - const resizeNode: ImageResizeInvocation = { - id: RESIZE, - type: 'img_resize', - image: { - image_name: initialImage.image_name, - }, - is_intermediate: true, - width, - height, - }; - - graph.nodes[RESIZE] = resizeNode; - - // The `RESIZE` node then passes its image to `IMAGE_TO_LATENTS` - graph.edges.push({ - source: { node_id: RESIZE, field: 'image' }, - destination: { - node_id: IMAGE_TO_LATENTS, - field: 'image', - }, - }); - - // The `RESIZE` node also passes its width and height to `NOISE` - graph.edges.push({ - source: { node_id: RESIZE, field: 'width' }, - destination: { - node_id: NOISE, - field: 'width', - }, - }); - - graph.edges.push({ - source: { node_id: RESIZE, field: 'height' }, - destination: { - node_id: NOISE, - field: 'height', - }, - }); + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'image', + }, + } + ); } else { - // We are not resizing, so we need to set the image on the `IMAGE_TO_LATENTS` node explicitly - (graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image = { - image_name: initialImage.image_name, + graph.nodes[CANVAS_OUTPUT] = { + type: 'l2i', + id: CANVAS_OUTPUT, + is_intermediate: !shouldAutoSave, + fp32: vaePrecision === 'fp32' ? true : false, }; - // Pass the image's dimensions to the `NOISE` node + (graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image = + initialImage; + graph.edges.push({ - source: { node_id: IMAGE_TO_LATENTS, field: 'width' }, - destination: { - node_id: NOISE, - field: 'width', + source: { + node_id: SDXL_DENOISE_LATENTS, + field: 'latents', }, - }); - graph.edges.push({ - source: { node_id: IMAGE_TO_LATENTS, field: 'height' }, destination: { - node_id: NOISE, - field: 'height', + node_id: CANVAS_OUTPUT, + field: 'latents', }, }); } @@ -318,8 +321,10 @@ export const buildCanvasSDXLImageToImageGraph = ( type: 'metadata_accumulator', generation_mode: 'img2img', cfg_scale, - height, - width, + width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width, + height: !isUsingScaledDimensions + ? height + : scaledBoundingBoxDimensions.height, positive_prompt: '', // set in addDynamicPromptsToGraph negative_prompt: negativePrompt, model, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts index e79e08ba41..bdfc60fe00 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts @@ -15,6 +15,7 @@ import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { CANVAS_OUTPUT, + LATENTS_TO_IMAGE, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, @@ -49,7 +50,15 @@ export const buildCanvasSDXLTextToImageGraph = ( // The bounding box determines width and height, not the width and height params const { width, height } = state.canvas.boundingBoxDimensions; - const { shouldAutoSave } = state.canvas; + const { + scaledBoundingBoxDimensions, + boundingBoxScaleMethod, + shouldAutoSave, + } = state.canvas; + + const isUsingScaledDimensions = ['auto', 'manual'].includes( + boundingBoxScaleMethod + ); const { shouldUseSDXLRefiner, refinerStart, shouldConcatSDXLStylePrompt } = state.sdxl; @@ -136,17 +145,15 @@ export const buildCanvasSDXLTextToImageGraph = ( type: 'noise', id: NOISE, is_intermediate: true, - width, - height, + width: !isUsingScaledDimensions + ? width + : scaledBoundingBoxDimensions.width, + height: !isUsingScaledDimensions + ? height + : scaledBoundingBoxDimensions.height, use_cpu, }, [t2lNode.id]: t2lNode, - [CANVAS_OUTPUT]: { - type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', - id: CANVAS_OUTPUT, - is_intermediate: !shouldAutoSave, - fp32: vaePrecision === 'fp32' ? true : false, - }, }, edges: [ // Connect Model Loader to UNet and CLIP @@ -231,19 +238,67 @@ export const buildCanvasSDXLTextToImageGraph = ( field: 'noise', }, }, - // Decode Denoised Latents To Image + ], + }; + + // Decode Latents To Image & Handle Scaled Before Processing + if (isUsingScaledDimensions) { + graph.nodes[LATENTS_TO_IMAGE] = { + id: LATENTS_TO_IMAGE, + type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }; + + graph.nodes[CANVAS_OUTPUT] = { + id: CANVAS_OUTPUT, + type: 'img_resize', + is_intermediate: !shouldAutoSave, + width: width, + height: height, + }; + + graph.edges.push( { source: { node_id: SDXL_DENOISE_LATENTS, field: 'latents', }, destination: { - node_id: CANVAS_OUTPUT, + node_id: LATENTS_TO_IMAGE, field: 'latents', }, }, - ], - }; + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'image', + }, + } + ); + } else { + graph.nodes[CANVAS_OUTPUT] = { + type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', + id: CANVAS_OUTPUT, + is_intermediate: !shouldAutoSave, + fp32: vaePrecision === 'fp32' ? true : false, + }; + + graph.edges.push({ + source: { + node_id: SDXL_DENOISE_LATENTS, + field: 'latents', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'latents', + }, + }); + } // add metadata accumulator, which is only mostly populated - some fields are added later graph.nodes[METADATA_ACCUMULATOR] = { @@ -251,8 +306,10 @@ export const buildCanvasSDXLTextToImageGraph = ( type: 'metadata_accumulator', generation_mode: 'txt2img', cfg_scale, - height, - width, + width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width, + height: !isUsingScaledDimensions + ? height + : scaledBoundingBoxDimensions.height, positive_prompt: '', // set in addDynamicPromptsToGraph negative_prompt: negativePrompt, model, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts index 4548a7e099..1ceb23b71e 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts @@ -17,6 +17,7 @@ import { CANVAS_TEXT_TO_IMAGE_GRAPH, CLIP_SKIP, DENOISE_LATENTS, + LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, @@ -39,6 +40,7 @@ export const buildCanvasTextToImageGraph = ( cfgScale: cfg_scale, scheduler, steps, + vaePrecision, clipSkip, shouldUseCpuNoise, shouldUseNoiseSettings, @@ -47,7 +49,15 @@ export const buildCanvasTextToImageGraph = ( // The bounding box determines width and height, not the width and height params const { width, height } = state.canvas.boundingBoxDimensions; - const { shouldAutoSave } = state.canvas; + const { + scaledBoundingBoxDimensions, + boundingBoxScaleMethod, + shouldAutoSave, + } = state.canvas; + + const isUsingScaledDimensions = ['auto', 'manual'].includes( + boundingBoxScaleMethod + ); if (!model) { log.error('No model found in state'); @@ -131,16 +141,15 @@ export const buildCanvasTextToImageGraph = ( type: 'noise', id: NOISE, is_intermediate: true, - width, - height, + width: !isUsingScaledDimensions + ? width + : scaledBoundingBoxDimensions.width, + height: !isUsingScaledDimensions + ? height + : scaledBoundingBoxDimensions.height, use_cpu, }, [t2lNode.id]: t2lNode, - [CANVAS_OUTPUT]: { - type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', - id: CANVAS_OUTPUT, - is_intermediate: !shouldAutoSave, - }, }, edges: [ // Connect Model Loader to UNet & CLIP Skip @@ -216,19 +225,67 @@ export const buildCanvasTextToImageGraph = ( field: 'noise', }, }, - // Decode denoised latents to image + ], + }; + + // Decode Latents To Image & Handle Scaled Before Processing + if (isUsingScaledDimensions) { + graph.nodes[LATENTS_TO_IMAGE] = { + id: LATENTS_TO_IMAGE, + type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }; + + graph.nodes[CANVAS_OUTPUT] = { + id: CANVAS_OUTPUT, + type: 'img_resize', + is_intermediate: !shouldAutoSave, + width: width, + height: height, + }; + + graph.edges.push( { source: { node_id: DENOISE_LATENTS, field: 'latents', }, destination: { - node_id: CANVAS_OUTPUT, + node_id: LATENTS_TO_IMAGE, field: 'latents', }, }, - ], - }; + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'image', + }, + } + ); + } else { + graph.nodes[CANVAS_OUTPUT] = { + type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', + id: CANVAS_OUTPUT, + is_intermediate: !shouldAutoSave, + fp32: vaePrecision === 'fp32' ? true : false, + }; + + graph.edges.push({ + source: { + node_id: DENOISE_LATENTS, + field: 'latents', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'latents', + }, + }); + } // add metadata accumulator, which is only mostly populated - some fields are added later graph.nodes[METADATA_ACCUMULATOR] = { @@ -236,8 +293,10 @@ export const buildCanvasTextToImageGraph = ( type: 'metadata_accumulator', generation_mode: 'txt2img', cfg_scale, - height, - width, + width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width, + height: !isUsingScaledDimensions + ? height + : scaledBoundingBoxDimensions.height, positive_prompt: '', // set in addDynamicPromptsToGraph negative_prompt: negativePrompt, model, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts index 09f6a1c2d1..c701386898 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts @@ -17,6 +17,7 @@ export const CLIP_SKIP = 'clip_skip'; export const IMAGE_TO_LATENTS = 'image_to_latents'; export const LATENTS_TO_LATENTS = 'latents_to_latents'; export const RESIZE = 'resize_image'; +export const IMG2IMG_RESIZE = 'img2img_resize'; export const CANVAS_OUTPUT = 'canvas_output'; export const INPAINT_IMAGE = 'inpaint_image'; export const SCALED_INPAINT_IMAGE = 'scaled_inpaint_image'; diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index aec77f4ed9..c089798721 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -1606,6 +1606,11 @@ export type components = { * @enum {string} */ type: "create_denoise_mask"; + /** + * Vae + * @description VAE + */ + vae?: components["schemas"]["VaeField"]; /** * Image * @description Image which will be masked @@ -1616,11 +1621,6 @@ export type components = { * @description The mask to use when pasting */ mask?: components["schemas"]["ImageField"]; - /** - * Vae - * @description VAE - */ - vae?: components["schemas"]["VaeField"]; /** * Tiled * @description Processing using overlapping tiles (reduce memory consumption) @@ -2995,6 +2995,11 @@ export type components = { * @enum {string} */ resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; + /** + * Metadata + * @description Optional core metadata to be written to image + */ + metadata?: components["schemas"]["CoreMetadata"]; }; /** * Image Saturation Adjustment @@ -6407,18 +6412,6 @@ export type components = { * @enum {string} */ StableDiffusionOnnxModelFormat: "olive" | "onnx"; - /** - * StableDiffusionXLModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusion1ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; /** * ControlNetModelFormat * @description An enumeration. @@ -6431,6 +6424,18 @@ export type components = { * @enum {string} */ StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusionXLModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusion1ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; From 1811b54727dc7d36584bf7b465fab8abc7a665db Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Sun, 27 Aug 2023 20:03:53 +0300 Subject: [PATCH 17/20] Provide metadata to image creation call --- invokeai/app/invocations/image.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index 68f71e6d99..cc1efa3a68 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -375,10 +375,8 @@ class ImageResizeInvocation(BaseInvocation): width: int = InputField(default=512, ge=64, multiple_of=8, description="The width to resize to (px)") height: int = InputField(default=512, ge=64, multiple_of=8, description="The height to resize to (px)") resample_mode: PIL_RESAMPLING_MODES = InputField(default="bicubic", description="The resampling mode") - metadata: CoreMetadata = InputField( - default=None, - description=FieldDescriptions.core_metadata, - ui_hidden=True, + metadata: Optional[CoreMetadata] = InputField( + default=None, description=FieldDescriptions.core_metadata, ui_hidden=True ) def invoke(self, context: InvocationContext) -> ImageOutput: @@ -398,6 +396,7 @@ class ImageResizeInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata.dict() if self.metadata else None, ) return ImageOutput( From 526c7e77378e7d246148409a56d2d319477c8bba Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Sun, 27 Aug 2023 20:04:55 +0300 Subject: [PATCH 18/20] Provide antialias argument as behaviour will be changed in future(deprecation warning) --- invokeai/app/invocations/latent.py | 4 ++-- invokeai/backend/stable_diffusion/diffusers_pipeline.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 90255bd55c..a94b209ce4 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -102,7 +102,7 @@ class CreateDenoiseMaskInvocation(BaseInvocation): context=context, ) - img_mask = tv_resize(mask, image.shape[-2:], T.InterpolationMode.BILINEAR) + img_mask = tv_resize(mask, image.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False) masked_image = image * torch.where(img_mask < 0.5, 0.0, 1.0) # TODO: masked_latents = ImageToLatentsInvocation.vae_encode(vae_info, self.fp32, self.tiled, masked_image.clone()) @@ -404,7 +404,7 @@ class DenoiseLatentsInvocation(BaseInvocation): return None, None mask = context.services.latents.get(self.denoise_mask.mask_name) - mask = tv_resize(mask, latents.shape[-2:], T.InterpolationMode.BILINEAR) + mask = tv_resize(mask, latents.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False) if self.denoise_mask.masked_latents_name is not None: masked_latents = context.services.latents.get(self.denoise_mask.masked_latents_name) else: diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index e2b18b8c81..b0800c42e1 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -144,7 +144,7 @@ def image_resized_to_grid_as_tensor(image: PIL.Image.Image, normalize: bool = Tr w, h = trim_to_multiple_of(*image.size, multiple_of=multiple_of) transformation = T.Compose( [ - T.Resize((h, w), T.InterpolationMode.LANCZOS), + T.Resize((h, w), T.InterpolationMode.LANCZOS, antialias=False), T.ToTensor(), ] ) From 3e6c49001cf94a2fec9eb3ecb28d157883fdea61 Mon Sep 17 00:00:00 2001 From: StAlKeR7779 Date: Mon, 28 Aug 2023 02:54:39 +0300 Subject: [PATCH 19/20] Change antialias to True as input - image Co-authored-by: Lincoln Stein --- invokeai/backend/stable_diffusion/diffusers_pipeline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index b0800c42e1..2d1894c896 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -144,7 +144,7 @@ def image_resized_to_grid_as_tensor(image: PIL.Image.Image, normalize: bool = Tr w, h = trim_to_multiple_of(*image.size, multiple_of=multiple_of) transformation = T.Compose( [ - T.Resize((h, w), T.InterpolationMode.LANCZOS, antialias=False), + T.Resize((h, w), T.InterpolationMode.LANCZOS, antialias=True), T.ToTensor(), ] ) From e60af40c8d2af363ffdfbb0c62ad33bc47c266e7 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Tue, 29 Aug 2023 01:11:55 +1200 Subject: [PATCH 20/20] chore: lint fixes --- invokeai/frontend/web/src/app/components/App.tsx | 4 ++-- .../parameters/hooks/usePreselectedImage.ts | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/invokeai/frontend/web/src/app/components/App.tsx b/invokeai/frontend/web/src/app/components/App.tsx index a4a0997443..a70ed03fda 100644 --- a/invokeai/frontend/web/src/app/components/App.tsx +++ b/invokeai/frontend/web/src/app/components/App.tsx @@ -12,12 +12,12 @@ import { languageSelector } from 'features/system/store/systemSelectors'; import InvokeTabs from 'features/ui/components/InvokeTabs'; import i18n from 'i18n'; import { size } from 'lodash-es'; -import { ReactNode, memo, useCallback, useEffect, useMemo } from 'react'; +import { ReactNode, memo, useCallback, useEffect } from 'react'; import { ErrorBoundary } from 'react-error-boundary'; +import { usePreselectedImage } from '../../features/parameters/hooks/usePreselectedImage'; import AppErrorBoundaryFallback from './AppErrorBoundaryFallback'; import GlobalHotkeys from './GlobalHotkeys'; import Toaster from './Toaster'; -import { usePreselectedImage } from '../../features/parameters/hooks/usePreselectedImage'; const DEFAULT_CONFIG = {}; diff --git a/invokeai/frontend/web/src/features/parameters/hooks/usePreselectedImage.ts b/invokeai/frontend/web/src/features/parameters/hooks/usePreselectedImage.ts index fa310a66ad..6f7ac46f25 100644 --- a/invokeai/frontend/web/src/features/parameters/hooks/usePreselectedImage.ts +++ b/invokeai/frontend/web/src/features/parameters/hooks/usePreselectedImage.ts @@ -1,16 +1,16 @@ import { skipToken } from '@reduxjs/toolkit/dist/query'; -import { useCallback, useMemo, useState } from 'react'; +import { t } from 'i18next'; +import { useCallback, useState } from 'react'; +import { useAppToaster } from '../../../app/components/Toaster'; +import { useAppDispatch } from '../../../app/store/storeHooks'; import { useGetImageDTOQuery, useGetImageMetadataQuery, } from '../../../services/api/endpoints/images'; -import { useAppDispatch } from '../../../app/store/storeHooks'; import { setInitialCanvasImage } from '../../canvas/store/canvasSlice'; import { setActiveTab } from '../../ui/store/uiSlice'; -import { useRecallParameters } from './useRecallParameters'; import { initialImageSelected } from '../store/actions'; -import { useAppToaster } from '../../../app/components/Toaster'; -import { t } from 'i18next'; +import { useRecallParameters } from './useRecallParameters'; type SelectedImage = { imageName: string; @@ -26,7 +26,7 @@ export const usePreselectedImage = () => { const { recallAllParameters } = useRecallParameters(); const toaster = useAppToaster(); - const { currentData: selectedImageDto, isError } = useGetImageDTOQuery( + const { currentData: selectedImageDto } = useGetImageDTOQuery( imageNameForDto ?? skipToken ); @@ -37,8 +37,8 @@ export const usePreselectedImage = () => { const handlePreselectedImage = useCallback( (selectedImage?: SelectedImage) => { if (!selectedImage) { -return; -} + return; + } if (selectedImage.action === 'sendToCanvas') { setImageNameForDto(selectedImage?.imageName);