diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py
index 3396778f5f..d57de57a37 100644
--- a/invokeai/app/invocations/controlnet_image_processors.py
+++ b/invokeai/app/invocations/controlnet_image_processors.py
@@ -96,7 +96,7 @@ class ControlOutput(BaseInvocationOutput):
control: ControlField = OutputField(description=FieldDescriptions.control)
-@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.0.0")
+@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.0")
class ControlNetInvocation(BaseInvocation):
"""Collects ControlNet info to pass to other nodes"""
@@ -173,7 +173,7 @@ class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithWorkflow):
title="Canny Processor",
tags=["controlnet", "canny"],
category="controlnet",
- version="1.0.0",
+ version="1.1.0",
)
class CannyImageProcessorInvocation(ImageProcessorInvocation):
"""Canny edge detection for ControlNet"""
@@ -196,7 +196,7 @@ class CannyImageProcessorInvocation(ImageProcessorInvocation):
title="HED (softedge) Processor",
tags=["controlnet", "hed", "softedge"],
category="controlnet",
- version="1.0.0",
+ version="1.1.0",
)
class HedImageProcessorInvocation(ImageProcessorInvocation):
"""Applies HED edge detection to image"""
@@ -225,7 +225,7 @@ class HedImageProcessorInvocation(ImageProcessorInvocation):
title="Lineart Processor",
tags=["controlnet", "lineart"],
category="controlnet",
- version="1.0.0",
+ version="1.1.0",
)
class LineartImageProcessorInvocation(ImageProcessorInvocation):
"""Applies line art processing to image"""
@@ -247,7 +247,7 @@ class LineartImageProcessorInvocation(ImageProcessorInvocation):
title="Lineart Anime Processor",
tags=["controlnet", "lineart", "anime"],
category="controlnet",
- version="1.0.0",
+ version="1.1.0",
)
class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
"""Applies line art anime processing to image"""
@@ -270,7 +270,7 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
title="Openpose Processor",
tags=["controlnet", "openpose", "pose"],
category="controlnet",
- version="1.0.0",
+ version="1.1.0",
)
class OpenposeImageProcessorInvocation(ImageProcessorInvocation):
"""Applies Openpose processing to image"""
@@ -295,7 +295,7 @@ class OpenposeImageProcessorInvocation(ImageProcessorInvocation):
title="Midas Depth Processor",
tags=["controlnet", "midas"],
category="controlnet",
- version="1.0.0",
+ version="1.1.0",
)
class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
"""Applies Midas depth processing to image"""
@@ -322,7 +322,7 @@ class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
title="Normal BAE Processor",
tags=["controlnet"],
category="controlnet",
- version="1.0.0",
+ version="1.1.0",
)
class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
"""Applies NormalBae processing to image"""
@@ -339,7 +339,7 @@ class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
@invocation(
- "mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.0.0"
+ "mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.1.0"
)
class MlsdImageProcessorInvocation(ImageProcessorInvocation):
"""Applies MLSD processing to image"""
@@ -362,7 +362,7 @@ class MlsdImageProcessorInvocation(ImageProcessorInvocation):
@invocation(
- "pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.0.0"
+ "pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.1.0"
)
class PidiImageProcessorInvocation(ImageProcessorInvocation):
"""Applies PIDI processing to image"""
@@ -389,7 +389,7 @@ class PidiImageProcessorInvocation(ImageProcessorInvocation):
title="Content Shuffle Processor",
tags=["controlnet", "contentshuffle"],
category="controlnet",
- version="1.0.0",
+ version="1.1.0",
)
class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
"""Applies content shuffle processing to image"""
@@ -419,7 +419,7 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
title="Zoe (Depth) Processor",
tags=["controlnet", "zoe", "depth"],
category="controlnet",
- version="1.0.0",
+ version="1.1.0",
)
class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
"""Applies Zoe depth processing to image"""
@@ -435,7 +435,7 @@ class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
title="Mediapipe Face Processor",
tags=["controlnet", "mediapipe", "face"],
category="controlnet",
- version="1.0.0",
+ version="1.1.0",
)
class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
"""Applies mediapipe face processing to image"""
@@ -458,7 +458,7 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
title="Leres (Depth) Processor",
tags=["controlnet", "leres", "depth"],
category="controlnet",
- version="1.0.0",
+ version="1.1.0",
)
class LeresImageProcessorInvocation(ImageProcessorInvocation):
"""Applies leres processing to image"""
@@ -487,7 +487,7 @@ class LeresImageProcessorInvocation(ImageProcessorInvocation):
title="Tile Resample Processor",
tags=["controlnet", "tile"],
category="controlnet",
- version="1.0.0",
+ version="1.1.0",
)
class TileResamplerProcessorInvocation(ImageProcessorInvocation):
"""Tile resampler processor"""
@@ -527,7 +527,7 @@ class TileResamplerProcessorInvocation(ImageProcessorInvocation):
title="Segment Anything Processor",
tags=["controlnet", "segmentanything"],
category="controlnet",
- version="1.0.0",
+ version="1.1.0",
)
class SegmentAnythingProcessorInvocation(ImageProcessorInvocation):
"""Applies segment anything processing to image"""
@@ -569,7 +569,7 @@ class SamDetectorReproducibleColors(SamDetector):
title="Color Map Processor",
tags=["controlnet"],
category="controlnet",
- version="1.0.0",
+ version="1.1.0",
)
class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
"""Generates a color map from the provided image"""
diff --git a/invokeai/app/invocations/cv.py b/invokeai/app/invocations/cv.py
index e5cfd327c1..b9764285f5 100644
--- a/invokeai/app/invocations/cv.py
+++ b/invokeai/app/invocations/cv.py
@@ -11,7 +11,7 @@ from invokeai.app.services.image_records.image_records_common import ImageCatego
from .baseinvocation import BaseInvocation, InputField, InvocationContext, WithMetadata, WithWorkflow, invocation
-@invocation("cv_inpaint", title="OpenCV Inpaint", tags=["opencv", "inpaint"], category="inpaint", version="1.0.0")
+@invocation("cv_inpaint", title="OpenCV Inpaint", tags=["opencv", "inpaint"], category="inpaint", version="1.1.0")
class CvInpaintInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Simple inpaint using opencv."""
diff --git a/invokeai/app/invocations/facetools.py b/invokeai/app/invocations/facetools.py
index 41d1ef1e4b..ef3d0aa9a1 100644
--- a/invokeai/app/invocations/facetools.py
+++ b/invokeai/app/invocations/facetools.py
@@ -438,7 +438,7 @@ def get_faces_list(
return all_faces
-@invocation("face_off", title="FaceOff", tags=["image", "faceoff", "face", "mask"], category="image", version="1.0.2")
+@invocation("face_off", title="FaceOff", tags=["image", "faceoff", "face", "mask"], category="image", version="1.1.0")
class FaceOffInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Bound, extract, and mask a face from an image using MediaPipe detection"""
@@ -532,7 +532,7 @@ class FaceOffInvocation(BaseInvocation, WithWorkflow, WithMetadata):
return output
-@invocation("face_mask_detection", title="FaceMask", tags=["image", "face", "mask"], category="image", version="1.0.2")
+@invocation("face_mask_detection", title="FaceMask", tags=["image", "face", "mask"], category="image", version="1.1.0")
class FaceMaskInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Face mask creation using mediapipe face detection"""
@@ -650,7 +650,7 @@ class FaceMaskInvocation(BaseInvocation, WithWorkflow, WithMetadata):
@invocation(
- "face_identifier", title="FaceIdentifier", tags=["image", "face", "identifier"], category="image", version="1.0.2"
+ "face_identifier", title="FaceIdentifier", tags=["image", "face", "identifier"], category="image", version="1.1.0"
)
class FaceIdentifierInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Outputs an image with detected face IDs printed on each face. For use with other FaceTools."""
diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py
index f4d32234b9..1e0adbd37a 100644
--- a/invokeai/app/invocations/image.py
+++ b/invokeai/app/invocations/image.py
@@ -36,7 +36,7 @@ class ShowImageInvocation(BaseInvocation):
)
-@invocation("blank_image", title="Blank Image", tags=["image"], category="image", version="1.0.0")
+@invocation("blank_image", title="Blank Image", tags=["image"], category="image", version="1.1.0")
class BlankImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Creates a blank image and forwards it to the pipeline"""
@@ -66,7 +66,7 @@ class BlankImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
)
-@invocation("img_crop", title="Crop Image", tags=["image", "crop"], category="image", version="1.0.0")
+@invocation("img_crop", title="Crop Image", tags=["image", "crop"], category="image", version="1.1.0")
class ImageCropInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Crops an image to a specified box. The box can be outside of the image."""
@@ -100,7 +100,7 @@ class ImageCropInvocation(BaseInvocation, WithWorkflow, WithMetadata):
)
-@invocation("img_paste", title="Paste Image", tags=["image", "paste"], category="image", version="1.0.1")
+@invocation("img_paste", title="Paste Image", tags=["image", "paste"], category="image", version="1.1.0")
class ImagePasteInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Pastes an image into another image."""
@@ -154,7 +154,7 @@ class ImagePasteInvocation(BaseInvocation, WithWorkflow, WithMetadata):
)
-@invocation("tomask", title="Mask from Alpha", tags=["image", "mask"], category="image", version="1.0.0")
+@invocation("tomask", title="Mask from Alpha", tags=["image", "mask"], category="image", version="1.1.0")
class MaskFromAlphaInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Extracts the alpha channel of an image as a mask."""
@@ -186,7 +186,7 @@ class MaskFromAlphaInvocation(BaseInvocation, WithWorkflow, WithMetadata):
)
-@invocation("img_mul", title="Multiply Images", tags=["image", "multiply"], category="image", version="1.0.0")
+@invocation("img_mul", title="Multiply Images", tags=["image", "multiply"], category="image", version="1.1.0")
class ImageMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Multiplies two images together using `PIL.ImageChops.multiply()`."""
@@ -220,7 +220,7 @@ class ImageMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata):
IMAGE_CHANNELS = Literal["A", "R", "G", "B"]
-@invocation("img_chan", title="Extract Image Channel", tags=["image", "channel"], category="image", version="1.0.0")
+@invocation("img_chan", title="Extract Image Channel", tags=["image", "channel"], category="image", version="1.1.0")
class ImageChannelInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Gets a channel from an image."""
@@ -253,7 +253,7 @@ class ImageChannelInvocation(BaseInvocation, WithWorkflow, WithMetadata):
IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"]
-@invocation("img_conv", title="Convert Image Mode", tags=["image", "convert"], category="image", version="1.0.0")
+@invocation("img_conv", title="Convert Image Mode", tags=["image", "convert"], category="image", version="1.1.0")
class ImageConvertInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Converts an image to a different mode."""
@@ -283,7 +283,7 @@ class ImageConvertInvocation(BaseInvocation, WithWorkflow, WithMetadata):
)
-@invocation("img_blur", title="Blur Image", tags=["image", "blur"], category="image", version="1.0.0")
+@invocation("img_blur", title="Blur Image", tags=["image", "blur"], category="image", version="1.1.0")
class ImageBlurInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Blurs an image"""
@@ -338,7 +338,7 @@ PIL_RESAMPLING_MAP = {
}
-@invocation("img_resize", title="Resize Image", tags=["image", "resize"], category="image", version="1.0.0")
+@invocation("img_resize", title="Resize Image", tags=["image", "resize"], category="image", version="1.1.0")
class ImageResizeInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Resizes an image to specific dimensions"""
@@ -375,7 +375,7 @@ class ImageResizeInvocation(BaseInvocation, WithMetadata, WithWorkflow):
)
-@invocation("img_scale", title="Scale Image", tags=["image", "scale"], category="image", version="1.0.0")
+@invocation("img_scale", title="Scale Image", tags=["image", "scale"], category="image", version="1.1.0")
class ImageScaleInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Scales an image by a factor"""
@@ -417,7 +417,7 @@ class ImageScaleInvocation(BaseInvocation, WithMetadata, WithWorkflow):
)
-@invocation("img_lerp", title="Lerp Image", tags=["image", "lerp"], category="image", version="1.0.0")
+@invocation("img_lerp", title="Lerp Image", tags=["image", "lerp"], category="image", version="1.1.0")
class ImageLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Linear interpolation of all pixels of an image"""
@@ -451,7 +451,7 @@ class ImageLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata):
)
-@invocation("img_ilerp", title="Inverse Lerp Image", tags=["image", "ilerp"], category="image", version="1.0.0")
+@invocation("img_ilerp", title="Inverse Lerp Image", tags=["image", "ilerp"], category="image", version="1.1.0")
class ImageInverseLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Inverse linear interpolation of all pixels of an image"""
@@ -485,7 +485,7 @@ class ImageInverseLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata):
)
-@invocation("img_nsfw", title="Blur NSFW Image", tags=["image", "nsfw"], category="image", version="1.0.0")
+@invocation("img_nsfw", title="Blur NSFW Image", tags=["image", "nsfw"], category="image", version="1.1.0")
class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Add blur to NSFW-flagged images"""
@@ -532,7 +532,7 @@ class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithWorkflow):
title="Add Invisible Watermark",
tags=["image", "watermark"],
category="image",
- version="1.0.0",
+ version="1.1.0",
)
class ImageWatermarkInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Add an invisible watermark to an image"""
@@ -561,7 +561,7 @@ class ImageWatermarkInvocation(BaseInvocation, WithMetadata, WithWorkflow):
)
-@invocation("mask_edge", title="Mask Edge", tags=["image", "mask", "inpaint"], category="image", version="1.0.0")
+@invocation("mask_edge", title="Mask Edge", tags=["image", "mask", "inpaint"], category="image", version="1.1.0")
class MaskEdgeInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Applies an edge mask to an image"""
@@ -612,7 +612,7 @@ class MaskEdgeInvocation(BaseInvocation, WithWorkflow, WithMetadata):
title="Combine Masks",
tags=["image", "mask", "multiply"],
category="image",
- version="1.0.0",
+ version="1.1.0",
)
class MaskCombineInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`."""
@@ -644,7 +644,7 @@ class MaskCombineInvocation(BaseInvocation, WithWorkflow, WithMetadata):
)
-@invocation("color_correct", title="Color Correct", tags=["image", "color"], category="image", version="1.0.0")
+@invocation("color_correct", title="Color Correct", tags=["image", "color"], category="image", version="1.1.0")
class ColorCorrectInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""
Shifts the colors of a target image to match the reference image, optionally
@@ -755,7 +755,7 @@ class ColorCorrectInvocation(BaseInvocation, WithWorkflow, WithMetadata):
)
-@invocation("img_hue_adjust", title="Adjust Image Hue", tags=["image", "hue"], category="image", version="1.0.0")
+@invocation("img_hue_adjust", title="Adjust Image Hue", tags=["image", "hue"], category="image", version="1.1.0")
class ImageHueAdjustmentInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Adjusts the Hue of an image."""
@@ -858,7 +858,7 @@ CHANNEL_FORMATS = {
"value",
],
category="image",
- version="1.0.0",
+ version="1.1.0",
)
class ImageChannelOffsetInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Add or subtract a value from a specific color channel of an image."""
@@ -929,7 +929,7 @@ class ImageChannelOffsetInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"value",
],
category="image",
- version="1.0.0",
+ version="1.1.0",
)
class ImageChannelMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Scale a specific color channel of an image."""
@@ -988,7 +988,7 @@ class ImageChannelMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata)
title="Save Image",
tags=["primitives", "image"],
category="primitives",
- version="1.0.1",
+ version="1.1.0",
use_cache=False,
)
class SaveImageInvocation(BaseInvocation, WithWorkflow, WithMetadata):
diff --git a/invokeai/app/invocations/infill.py b/invokeai/app/invocations/infill.py
index b100fe7c4e..9905aa1b5e 100644
--- a/invokeai/app/invocations/infill.py
+++ b/invokeai/app/invocations/infill.py
@@ -118,7 +118,7 @@ def tile_fill_missing(im: Image.Image, tile_size: int = 16, seed: Optional[int]
return si
-@invocation("infill_rgba", title="Solid Color Infill", tags=["image", "inpaint"], category="inpaint", version="1.0.0")
+@invocation("infill_rgba", title="Solid Color Infill", tags=["image", "inpaint"], category="inpaint", version="1.1.0")
class InfillColorInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Infills transparent areas of an image with a solid color"""
@@ -154,7 +154,7 @@ class InfillColorInvocation(BaseInvocation, WithWorkflow, WithMetadata):
)
-@invocation("infill_tile", title="Tile Infill", tags=["image", "inpaint"], category="inpaint", version="1.0.0")
+@invocation("infill_tile", title="Tile Infill", tags=["image", "inpaint"], category="inpaint", version="1.1.0")
class InfillTileInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Infills transparent areas of an image with tiles of the image"""
@@ -192,7 +192,7 @@ class InfillTileInvocation(BaseInvocation, WithWorkflow, WithMetadata):
@invocation(
- "infill_patchmatch", title="PatchMatch Infill", tags=["image", "inpaint"], category="inpaint", version="1.0.0"
+ "infill_patchmatch", title="PatchMatch Infill", tags=["image", "inpaint"], category="inpaint", version="1.1.0"
)
class InfillPatchMatchInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Infills transparent areas of an image using the PatchMatch algorithm"""
@@ -245,7 +245,7 @@ class InfillPatchMatchInvocation(BaseInvocation, WithWorkflow, WithMetadata):
)
-@invocation("infill_lama", title="LaMa Infill", tags=["image", "inpaint"], category="inpaint", version="1.0.0")
+@invocation("infill_lama", title="LaMa Infill", tags=["image", "inpaint"], category="inpaint", version="1.1.0")
class LaMaInfillInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Infills transparent areas of an image using the LaMa model"""
@@ -274,7 +274,7 @@ class LaMaInfillInvocation(BaseInvocation, WithWorkflow, WithMetadata):
)
-@invocation("infill_cv2", title="CV2 Infill", tags=["image", "inpaint"], category="inpaint")
+@invocation("infill_cv2", title="CV2 Infill", tags=["image", "inpaint"], category="inpaint", version="1.1.0")
class CV2InfillInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Infills transparent areas of an image using OpenCV Inpainting"""
diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py
index 9412aec39b..12d30301cf 100644
--- a/invokeai/app/invocations/latent.py
+++ b/invokeai/app/invocations/latent.py
@@ -790,7 +790,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
title="Latents to Image",
tags=["latents", "image", "vae", "l2i"],
category="latents",
- version="1.0.0",
+ version="1.1.0",
)
class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Generates an image from latents."""
diff --git a/invokeai/app/invocations/onnx.py b/invokeai/app/invocations/onnx.py
index 45b5ed61b1..37b63fe692 100644
--- a/invokeai/app/invocations/onnx.py
+++ b/invokeai/app/invocations/onnx.py
@@ -326,7 +326,7 @@ class ONNXTextToLatentsInvocation(BaseInvocation):
title="ONNX Latents to Image",
tags=["latents", "image", "vae", "onnx"],
category="image",
- version="1.0.0",
+ version="1.1.0",
)
class ONNXLatentsToImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Generates an image from latents."""
diff --git a/invokeai/app/invocations/upscale.py b/invokeai/app/invocations/upscale.py
index 1167914aca..557325eaa0 100644
--- a/invokeai/app/invocations/upscale.py
+++ b/invokeai/app/invocations/upscale.py
@@ -29,7 +29,7 @@ if choose_torch_device() == torch.device("mps"):
from torch import mps
-@invocation("esrgan", title="Upscale (RealESRGAN)", tags=["esrgan", "upscale"], category="esrgan", version="1.1.0")
+@invocation("esrgan", title="Upscale (RealESRGAN)", tags=["esrgan", "upscale"], category="esrgan", version="1.2.0")
class ESRGANInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Upscales an image using RealESRGAN."""
diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json
index 94e05f791c..561b577a46 100644
--- a/invokeai/frontend/web/public/locales/en.json
+++ b/invokeai/frontend/web/public/locales/en.json
@@ -920,7 +920,10 @@
"unknownTemplate": "Unknown Template",
"unkownInvocation": "Unknown Invocation type",
"updateNode": "Update Node",
+ "updateAllNodes": "Update All Nodes",
"updateApp": "Update App",
+ "unableToUpdateNodes_one": "Unable to update {{count}} node",
+ "unableToUpdateNodes_other": "Unable to update {{count}} nodes",
"vaeField": "Vae",
"vaeFieldDescription": "Vae submodel.",
"vaeModelField": "VAE",
diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts
index 772ea216c0..9c1727fc79 100644
--- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts
+++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts
@@ -72,6 +72,7 @@ import { addStagingAreaImageSavedListener } from './listeners/stagingAreaImageSa
import { addTabChangedListener } from './listeners/tabChanged';
import { addUpscaleRequestedListener } from './listeners/upscaleRequested';
import { addWorkflowLoadedListener } from './listeners/workflowLoaded';
+import { addUpdateAllNodesRequestedListener } from './listeners/updateAllNodesRequested';
export const listenerMiddleware = createListenerMiddleware();
@@ -178,6 +179,7 @@ addReceivedOpenAPISchemaListener();
// Workflows
addWorkflowLoadedListener();
+addUpdateAllNodesRequestedListener();
// DND
addImageDroppedListener();
diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/updateAllNodesRequested.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/updateAllNodesRequested.ts
new file mode 100644
index 0000000000..ece6702ceb
--- /dev/null
+++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/updateAllNodesRequested.ts
@@ -0,0 +1,52 @@
+import {
+ getNeedsUpdate,
+ updateNode,
+} from 'features/nodes/hooks/useNodeVersion';
+import { updateAllNodesRequested } from 'features/nodes/store/actions';
+import { nodeReplaced } from 'features/nodes/store/nodesSlice';
+import { startAppListening } from '..';
+import { logger } from 'app/logging/logger';
+import { addToast } from 'features/system/store/systemSlice';
+import { makeToast } from 'features/system/util/makeToast';
+import { t } from 'i18next';
+
+export const addUpdateAllNodesRequestedListener = () => {
+ startAppListening({
+ actionCreator: updateAllNodesRequested,
+ effect: (action, { dispatch, getState }) => {
+ const log = logger('nodes');
+ const nodes = getState().nodes.nodes;
+ const templates = getState().nodes.nodeTemplates;
+
+ let unableToUpdateCount = 0;
+
+ nodes.forEach((node) => {
+ const template = templates[node.data.type];
+ const needsUpdate = getNeedsUpdate(node, template);
+ const updatedNode = updateNode(node, template);
+ if (!updatedNode) {
+ if (needsUpdate) {
+ unableToUpdateCount++;
+ }
+ return;
+ }
+ dispatch(nodeReplaced({ nodeId: updatedNode.id, node: updatedNode }));
+ });
+
+ if (unableToUpdateCount) {
+ log.warn(
+ `Unable to update ${unableToUpdateCount} nodes. Please report this issue.`
+ );
+ dispatch(
+ addToast(
+ makeToast({
+ title: t('nodes.unableToUpdateNodes', {
+ count: unableToUpdateCount,
+ }),
+ })
+ )
+ );
+ }
+ },
+ });
+};
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHeader.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHeader.tsx
index cd6c5215d1..643e003f72 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHeader.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHeader.tsx
@@ -3,7 +3,7 @@ import { memo } from 'react';
import NodeCollapseButton from '../common/NodeCollapseButton';
import NodeTitle from '../common/NodeTitle';
import InvocationNodeCollapsedHandles from './InvocationNodeCollapsedHandles';
-import InvocationNodeNotes from './InvocationNodeNotes';
+import InvocationNodeInfoIcon from './InvocationNodeInfoIcon';
import InvocationNodeStatusIndicator from './InvocationNodeStatusIndicator';
type Props = {
@@ -34,7 +34,7 @@ const InvocationNodeHeader = ({ nodeId, isOpen }: Props) => {
-
+
{!isOpen && }
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeNotes.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeInfoIcon.tsx
similarity index 58%
rename from invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeNotes.tsx
rename to invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeInfoIcon.tsx
index 8a96fb4230..83867a35cb 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeNotes.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeInfoIcon.tsx
@@ -1,85 +1,39 @@
-import {
- Flex,
- Icon,
- Modal,
- ModalBody,
- ModalCloseButton,
- ModalContent,
- ModalFooter,
- ModalHeader,
- ModalOverlay,
- Text,
- Tooltip,
- useDisclosure,
-} from '@chakra-ui/react';
+import { Flex, Icon, Text, Tooltip } from '@chakra-ui/react';
import { compare } from 'compare-versions';
import { useNodeData } from 'features/nodes/hooks/useNodeData';
-import { useNodeLabel } from 'features/nodes/hooks/useNodeLabel';
import { useNodeTemplate } from 'features/nodes/hooks/useNodeTemplate';
-import { useNodeTemplateTitle } from 'features/nodes/hooks/useNodeTemplateTitle';
+import { useNodeVersion } from 'features/nodes/hooks/useNodeVersion';
import { isInvocationNodeData } from 'features/nodes/types/types';
import { memo, useMemo } from 'react';
-import { FaInfoCircle } from 'react-icons/fa';
-import NotesTextarea from './NotesTextarea';
-import { useDoNodeVersionsMatch } from 'features/nodes/hooks/useDoNodeVersionsMatch';
import { useTranslation } from 'react-i18next';
+import { FaInfoCircle } from 'react-icons/fa';
interface Props {
nodeId: string;
}
-const InvocationNodeNotes = ({ nodeId }: Props) => {
- const { isOpen, onOpen, onClose } = useDisclosure();
- const label = useNodeLabel(nodeId);
- const title = useNodeTemplateTitle(nodeId);
- const doVersionsMatch = useDoNodeVersionsMatch(nodeId);
- const { t } = useTranslation();
+const InvocationNodeInfoIcon = ({ nodeId }: Props) => {
+ const { needsUpdate } = useNodeVersion(nodeId);
return (
- <>
- }
- placement="top"
- shouldWrapChildren
- >
-
-
-
-
-
-
-
-
- {label || title || t('nodes.unknownNode')}
-
-
-
-
-
-
-
- >
+ }
+ placement="top"
+ shouldWrapChildren
+ >
+
+
);
};
-export default memo(InvocationNodeNotes);
+export default memo(InvocationNodeInfoIcon);
const TooltipContent = memo(({ nodeId }: { nodeId: string }) => {
const data = useNodeData(nodeId);
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopLeftPanel/TopLeftPanel.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopLeftPanel/TopLeftPanel.tsx
index d355eab348..38aa9bbad7 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopLeftPanel/TopLeftPanel.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopLeftPanel/TopLeftPanel.tsx
@@ -3,15 +3,22 @@ import { useAppDispatch } from 'app/store/storeHooks';
import IAIIconButton from 'common/components/IAIIconButton';
import { addNodePopoverOpened } from 'features/nodes/store/nodesSlice';
import { memo, useCallback } from 'react';
-import { FaPlus } from 'react-icons/fa';
+import { FaPlus, FaSync } from 'react-icons/fa';
import { useTranslation } from 'react-i18next';
+import IAIButton from 'common/components/IAIButton';
+import { useGetNodesNeedUpdate } from 'features/nodes/hooks/useGetNodesNeedUpdate';
+import { updateAllNodesRequested } from 'features/nodes/store/actions';
const TopLeftPanel = () => {
const dispatch = useAppDispatch();
const { t } = useTranslation();
+ const nodesNeedUpdate = useGetNodesNeedUpdate();
const handleOpenAddNodePopover = useCallback(() => {
dispatch(addNodePopoverOpened());
}, [dispatch]);
+ const handleClickUpdateNodes = useCallback(() => {
+ dispatch(updateAllNodesRequested());
+ }, [dispatch]);
return (
@@ -21,6 +28,11 @@ const TopLeftPanel = () => {
icon={}
onClick={handleOpenAddNodePopover}
/>
+ {nodesNeedUpdate && (
+ } onClick={handleClickUpdateNodes}>
+ {t('nodes.updateAllNodes')}
+
+ )}
);
};
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorDetailsTab.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorDetailsTab.tsx
new file mode 100644
index 0000000000..a627f33f24
--- /dev/null
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorDetailsTab.tsx
@@ -0,0 +1,125 @@
+import {
+ Box,
+ Flex,
+ FormControl,
+ FormLabel,
+ HStack,
+ Text,
+} from '@chakra-ui/react';
+import { createSelector } from '@reduxjs/toolkit';
+import { stateSelector } from 'app/store/store';
+import { useAppSelector } from 'app/store/storeHooks';
+import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
+import IAIIconButton from 'common/components/IAIIconButton';
+import { IAINoContentFallback } from 'common/components/IAIImageFallback';
+import { useNodeVersion } from 'features/nodes/hooks/useNodeVersion';
+import {
+ InvocationNodeData,
+ InvocationTemplate,
+ isInvocationNode,
+} from 'features/nodes/types/types';
+import { memo } from 'react';
+import { useTranslation } from 'react-i18next';
+import { FaSync } from 'react-icons/fa';
+import { Node } from 'reactflow';
+import NotesTextarea from '../../flow/nodes/Invocation/NotesTextarea';
+import ScrollableContent from '../ScrollableContent';
+import EditableNodeTitle from './details/EditableNodeTitle';
+
+const selector = createSelector(
+ stateSelector,
+ ({ nodes }) => {
+ const lastSelectedNodeId =
+ nodes.selectedNodes[nodes.selectedNodes.length - 1];
+
+ const lastSelectedNode = nodes.nodes.find(
+ (node) => node.id === lastSelectedNodeId
+ );
+
+ const lastSelectedNodeTemplate = lastSelectedNode
+ ? nodes.nodeTemplates[lastSelectedNode.data.type]
+ : undefined;
+
+ return {
+ node: lastSelectedNode,
+ template: lastSelectedNodeTemplate,
+ };
+ },
+ defaultSelectorOptions
+);
+
+const InspectorDetailsTab = () => {
+ const { node, template } = useAppSelector(selector);
+ const { t } = useTranslation();
+
+ if (!template || !isInvocationNode(node)) {
+ return (
+
+ );
+ }
+
+ return ;
+};
+
+export default memo(InspectorDetailsTab);
+
+const Content = (props: {
+ node: Node;
+ template: InvocationTemplate;
+}) => {
+ const { t } = useTranslation();
+ const { needsUpdate, updateNode } = useNodeVersion(props.node.id);
+ return (
+
+
+
+
+
+
+ Node Type
+
+ {props.template.title}
+
+
+
+
+ Node Version
+
+ {props.node.data.version}
+
+
+ {needsUpdate && (
+ }
+ onClick={updateNode}
+ />
+ )}
+
+
+
+
+
+
+ );
+};
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorPanel.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorPanel.tsx
index 9b13cf9e1c..e3dc6645c5 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorPanel.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorPanel.tsx
@@ -10,7 +10,7 @@ import { memo } from 'react';
import InspectorDataTab from './InspectorDataTab';
import InspectorOutputsTab from './InspectorOutputsTab';
import InspectorTemplateTab from './InspectorTemplateTab';
-// import InspectorDetailsTab from './InspectorDetailsTab';
+import InspectorDetailsTab from './InspectorDetailsTab';
const InspectorPanel = () => {
return (
@@ -30,16 +30,16 @@ const InspectorPanel = () => {
sx={{ display: 'flex', flexDir: 'column', w: 'full', h: 'full' }}
>
- {/* Details */}
+ Details
Outputs
Data
Template
- {/*
+
- */}
+
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/details/EditableNodeTitle.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/details/EditableNodeTitle.tsx
new file mode 100644
index 0000000000..bf32046c6c
--- /dev/null
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/details/EditableNodeTitle.tsx
@@ -0,0 +1,74 @@
+import {
+ Editable,
+ EditableInput,
+ EditablePreview,
+ Flex,
+} from '@chakra-ui/react';
+import { useAppDispatch } from 'app/store/storeHooks';
+import { useNodeLabel } from 'features/nodes/hooks/useNodeLabel';
+import { useNodeTemplateTitle } from 'features/nodes/hooks/useNodeTemplateTitle';
+import { nodeLabelChanged } from 'features/nodes/store/nodesSlice';
+import { memo, useCallback, useEffect, useState } from 'react';
+import { useTranslation } from 'react-i18next';
+
+type Props = {
+ nodeId: string;
+ title?: string;
+};
+
+const EditableNodeTitle = ({ nodeId, title }: Props) => {
+ const dispatch = useAppDispatch();
+ const label = useNodeLabel(nodeId);
+ const templateTitle = useNodeTemplateTitle(nodeId);
+ const { t } = useTranslation();
+
+ const [localTitle, setLocalTitle] = useState('');
+ const handleSubmit = useCallback(
+ async (newTitle: string) => {
+ dispatch(nodeLabelChanged({ nodeId, label: newTitle }));
+ setLocalTitle(
+ label || title || templateTitle || t('nodes.problemSettingTitle')
+ );
+ },
+ [dispatch, nodeId, title, templateTitle, label, t]
+ );
+
+ const handleChange = useCallback((newTitle: string) => {
+ setLocalTitle(newTitle);
+ }, []);
+
+ useEffect(() => {
+ // Another component may change the title; sync local title with global state
+ setLocalTitle(
+ label || title || templateTitle || t('nodes.problemSettingTitle')
+ );
+ }, [label, templateTitle, title, t]);
+
+ return (
+
+
+
+
+
+
+ );
+};
+
+export default memo(EditableNodeTitle);
diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useBuildNodeData.ts b/invokeai/frontend/web/src/features/nodes/hooks/useBuildNodeData.ts
index 40c3f029d7..036ce8d44e 100644
--- a/invokeai/frontend/web/src/features/nodes/hooks/useBuildNodeData.ts
+++ b/invokeai/frontend/web/src/features/nodes/hooks/useBuildNodeData.ts
@@ -1,19 +1,10 @@
import { createSelector } from '@reduxjs/toolkit';
import { RootState } from 'app/store/store';
import { useAppSelector } from 'app/store/storeHooks';
-import { reduce } from 'lodash-es';
import { useCallback } from 'react';
import { Node, useReactFlow } from 'reactflow';
import { AnyInvocationType } from 'services/events/types';
-import { v4 as uuidv4 } from 'uuid';
-import {
- CurrentImageNodeData,
- InputFieldValue,
- InvocationNodeData,
- NotesNodeData,
- OutputFieldValue,
-} from '../types/types';
-import { buildInputFieldValue } from '../util/fieldValueBuilders';
+import { buildNodeData } from '../store/util/buildNodeData';
import { DRAG_HANDLE_CLASSNAME, NODE_WIDTH } from '../types/constants';
const templatesSelector = createSelector(
@@ -26,14 +17,12 @@ export const SHARED_NODE_PROPERTIES: Partial = {
};
export const useBuildNodeData = () => {
- const invocationTemplates = useAppSelector(templatesSelector);
+ const nodeTemplates = useAppSelector(templatesSelector);
const flow = useReactFlow();
return useCallback(
(type: AnyInvocationType | 'current_image' | 'notes') => {
- const nodeId = uuidv4();
-
let _x = window.innerWidth / 2;
let _y = window.innerHeight / 2;
@@ -47,111 +36,15 @@ export const useBuildNodeData = () => {
_y = rect.height / 2 - NODE_WIDTH / 2;
}
- const { x, y } = flow.project({
+ const position = flow.project({
x: _x,
y: _y,
});
- if (type === 'current_image') {
- const node: Node = {
- ...SHARED_NODE_PROPERTIES,
- id: nodeId,
- type: 'current_image',
- position: { x: x, y: y },
- data: {
- id: nodeId,
- type: 'current_image',
- isOpen: true,
- label: 'Current Image',
- },
- };
+ const template = nodeTemplates[type];
- return node;
- }
-
- if (type === 'notes') {
- const node: Node = {
- ...SHARED_NODE_PROPERTIES,
- id: nodeId,
- type: 'notes',
- position: { x: x, y: y },
- data: {
- id: nodeId,
- isOpen: true,
- label: 'Notes',
- notes: '',
- type: 'notes',
- },
- };
-
- return node;
- }
-
- const template = invocationTemplates[type];
-
- if (template === undefined) {
- console.error(`Unable to find template ${type}.`);
- return;
- }
-
- const inputs = reduce(
- template.inputs,
- (inputsAccumulator, inputTemplate, inputName) => {
- const fieldId = uuidv4();
-
- const inputFieldValue: InputFieldValue = buildInputFieldValue(
- fieldId,
- inputTemplate
- );
-
- inputsAccumulator[inputName] = inputFieldValue;
-
- return inputsAccumulator;
- },
- {} as Record
- );
-
- const outputs = reduce(
- template.outputs,
- (outputsAccumulator, outputTemplate, outputName) => {
- const fieldId = uuidv4();
-
- const outputFieldValue: OutputFieldValue = {
- id: fieldId,
- name: outputName,
- type: outputTemplate.type,
- fieldKind: 'output',
- };
-
- outputsAccumulator[outputName] = outputFieldValue;
-
- return outputsAccumulator;
- },
- {} as Record
- );
-
- const invocation: Node = {
- ...SHARED_NODE_PROPERTIES,
- id: nodeId,
- type: 'invocation',
- position: { x: x, y: y },
- data: {
- id: nodeId,
- type,
- version: template.version,
- label: '',
- notes: '',
- isOpen: true,
- embedWorkflow: false,
- isIntermediate: type === 'save_image' ? false : true,
- inputs,
- outputs,
- useCache: template.useCache,
- },
- };
-
- return invocation;
+ return buildNodeData(type, position, template);
},
- [invocationTemplates, flow]
+ [nodeTemplates, flow]
);
};
diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useGetNodesNeedUpdate.ts b/invokeai/frontend/web/src/features/nodes/hooks/useGetNodesNeedUpdate.ts
new file mode 100644
index 0000000000..a413de38ae
--- /dev/null
+++ b/invokeai/frontend/web/src/features/nodes/hooks/useGetNodesNeedUpdate.ts
@@ -0,0 +1,25 @@
+import { createSelector } from '@reduxjs/toolkit';
+import { stateSelector } from 'app/store/store';
+import { useAppSelector } from 'app/store/storeHooks';
+import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
+import { getNeedsUpdate } from './useNodeVersion';
+
+const selector = createSelector(
+ stateSelector,
+ (state) => {
+ const nodes = state.nodes.nodes;
+ const templates = state.nodes.nodeTemplates;
+
+ const needsUpdate = nodes.some((node) => {
+ const template = templates[node.data.type];
+ return getNeedsUpdate(node, template);
+ });
+ return needsUpdate;
+ },
+ defaultSelectorOptions
+);
+
+export const useGetNodesNeedUpdate = () => {
+ const getNeedsUpdate = useAppSelector(selector);
+ return getNeedsUpdate;
+};
diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useNodeTemplateByType.ts b/invokeai/frontend/web/src/features/nodes/hooks/useNodeTemplateByType.ts
new file mode 100644
index 0000000000..6fd0615563
--- /dev/null
+++ b/invokeai/frontend/web/src/features/nodes/hooks/useNodeTemplateByType.ts
@@ -0,0 +1,27 @@
+import { createSelector } from '@reduxjs/toolkit';
+import { stateSelector } from 'app/store/store';
+import { useAppSelector } from 'app/store/storeHooks';
+import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
+import { useMemo } from 'react';
+import { AnyInvocationType } from 'services/events/types';
+
+export const useNodeTemplateByType = (
+ type: AnyInvocationType | 'current_image' | 'notes'
+) => {
+ const selector = useMemo(
+ () =>
+ createSelector(
+ stateSelector,
+ ({ nodes }) => {
+ const nodeTemplate = nodes.nodeTemplates[type];
+ return nodeTemplate;
+ },
+ defaultSelectorOptions
+ ),
+ [type]
+ );
+
+ const nodeTemplate = useAppSelector(selector);
+
+ return nodeTemplate;
+};
diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useNodeVersion.ts b/invokeai/frontend/web/src/features/nodes/hooks/useNodeVersion.ts
new file mode 100644
index 0000000000..1f213d6481
--- /dev/null
+++ b/invokeai/frontend/web/src/features/nodes/hooks/useNodeVersion.ts
@@ -0,0 +1,119 @@
+import { createSelector } from '@reduxjs/toolkit';
+import { stateSelector } from 'app/store/store';
+import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
+import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
+import { satisfies } from 'compare-versions';
+import { cloneDeep, defaultsDeep } from 'lodash-es';
+import { useCallback, useMemo } from 'react';
+import { Node } from 'reactflow';
+import { AnyInvocationType } from 'services/events/types';
+import { nodeReplaced } from '../store/nodesSlice';
+import { buildNodeData } from '../store/util/buildNodeData';
+import {
+ InvocationNodeData,
+ InvocationTemplate,
+ NodeData,
+ isInvocationNode,
+ zParsedSemver,
+} from '../types/types';
+import { useAppToaster } from 'app/components/Toaster';
+import { useTranslation } from 'react-i18next';
+
+export const getNeedsUpdate = (
+ node?: Node,
+ template?: InvocationTemplate
+) => {
+ if (!isInvocationNode(node) || !template) {
+ return false;
+ }
+ return node.data.version !== template.version;
+};
+
+export const getMayUpdateNode = (
+ node?: Node,
+ template?: InvocationTemplate
+) => {
+ const needsUpdate = getNeedsUpdate(node, template);
+ if (
+ !needsUpdate ||
+ !isInvocationNode(node) ||
+ !template ||
+ !node.data.version
+ ) {
+ return false;
+ }
+ const templateMajor = zParsedSemver.parse(template.version).major;
+
+ return satisfies(node.data.version, `^${templateMajor}`);
+};
+
+export const updateNode = (
+ node?: Node,
+ template?: InvocationTemplate
+) => {
+ const mayUpdate = getMayUpdateNode(node, template);
+ if (
+ !mayUpdate ||
+ !isInvocationNode(node) ||
+ !template ||
+ !node.data.version
+ ) {
+ return;
+ }
+
+ const defaults = buildNodeData(
+ node.data.type as AnyInvocationType,
+ node.position,
+ template
+ ) as Node;
+
+ const clone = cloneDeep(node);
+ clone.data.version = template.version;
+ defaultsDeep(clone, defaults);
+ return clone;
+};
+
+export const useNodeVersion = (nodeId: string) => {
+ const dispatch = useAppDispatch();
+ const toast = useAppToaster();
+ const { t } = useTranslation();
+ const selector = useMemo(
+ () =>
+ createSelector(
+ stateSelector,
+ ({ nodes }) => {
+ const node = nodes.nodes.find((node) => node.id === nodeId);
+ const nodeTemplate = nodes.nodeTemplates[node?.data.type ?? ''];
+ return { node, nodeTemplate };
+ },
+ defaultSelectorOptions
+ ),
+ [nodeId]
+ );
+
+ const { node, nodeTemplate } = useAppSelector(selector);
+
+ const needsUpdate = useMemo(
+ () => getNeedsUpdate(node, nodeTemplate),
+ [node, nodeTemplate]
+ );
+
+ const mayUpdate = useMemo(
+ () => getMayUpdateNode(node, nodeTemplate),
+ [node, nodeTemplate]
+ );
+
+ const _updateNode = useCallback(() => {
+ const needsUpdate = getNeedsUpdate(node, nodeTemplate);
+ const updatedNode = updateNode(node, nodeTemplate);
+ if (!updatedNode) {
+ if (needsUpdate) {
+ toast({ title: t('nodes.unableToUpdateNodes', { count: 1 }) });
+ }
+ return;
+ }
+ dispatch(nodeReplaced({ nodeId: updatedNode.id, node: updatedNode }));
+ }, [dispatch, node, nodeTemplate, t, toast]);
+
+ return { needsUpdate, mayUpdate, updateNode: _updateNode };
+};
diff --git a/invokeai/frontend/web/src/features/nodes/store/actions.ts b/invokeai/frontend/web/src/features/nodes/store/actions.ts
index cf7ccf8238..0d75e6934d 100644
--- a/invokeai/frontend/web/src/features/nodes/store/actions.ts
+++ b/invokeai/frontend/web/src/features/nodes/store/actions.ts
@@ -21,3 +21,7 @@ export const isAnyGraphBuilt = isAnyOf(
export const workflowLoadRequested = createAction(
'nodes/workflowLoadRequested'
);
+
+export const updateAllNodesRequested = createAction(
+ 'nodes/updateAllNodesRequested'
+);
diff --git a/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts b/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts
index cb8f3b7d28..3acef5978f 100644
--- a/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts
+++ b/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts
@@ -149,6 +149,18 @@ const nodesSlice = createSlice({
nodesChanged: (state, action: PayloadAction) => {
state.nodes = applyNodeChanges(action.payload, state.nodes);
},
+ nodeReplaced: (
+ state,
+ action: PayloadAction<{ nodeId: string; node: Node }>
+ ) => {
+ const nodeIndex = state.nodes.findIndex(
+ (n) => n.id === action.payload.nodeId
+ );
+ if (nodeIndex < 0) {
+ return;
+ }
+ state.nodes[nodeIndex] = action.payload.node;
+ },
nodeAdded: (
state,
action: PayloadAction<
@@ -1029,6 +1041,7 @@ export const {
mouseOverFieldChanged,
mouseOverNodeChanged,
nodeAdded,
+ nodeReplaced,
nodeEditorReset,
nodeEmbedWorkflowChanged,
nodeExclusivelySelected,
diff --git a/invokeai/frontend/web/src/features/nodes/store/util/buildNodeData.ts b/invokeai/frontend/web/src/features/nodes/store/util/buildNodeData.ts
new file mode 100644
index 0000000000..6cecc8c409
--- /dev/null
+++ b/invokeai/frontend/web/src/features/nodes/store/util/buildNodeData.ts
@@ -0,0 +1,127 @@
+import { DRAG_HANDLE_CLASSNAME } from 'features/nodes/types/constants';
+import {
+ CurrentImageNodeData,
+ InputFieldValue,
+ InvocationNodeData,
+ InvocationTemplate,
+ NotesNodeData,
+ OutputFieldValue,
+} from 'features/nodes/types/types';
+import { buildInputFieldValue } from 'features/nodes/util/fieldValueBuilders';
+import { reduce } from 'lodash-es';
+import { Node, XYPosition } from 'reactflow';
+import { AnyInvocationType } from 'services/events/types';
+import { v4 as uuidv4 } from 'uuid';
+
+export const SHARED_NODE_PROPERTIES: Partial = {
+ dragHandle: `.${DRAG_HANDLE_CLASSNAME}`,
+};
+export const buildNodeData = (
+ type: AnyInvocationType | 'current_image' | 'notes',
+ position: XYPosition,
+ template?: InvocationTemplate
+):
+ | Node
+ | Node
+ | Node
+ | undefined => {
+ const nodeId = uuidv4();
+
+ if (type === 'current_image') {
+ const node: Node = {
+ ...SHARED_NODE_PROPERTIES,
+ id: nodeId,
+ type: 'current_image',
+ position,
+ data: {
+ id: nodeId,
+ type: 'current_image',
+ isOpen: true,
+ label: 'Current Image',
+ },
+ };
+
+ return node;
+ }
+
+ if (type === 'notes') {
+ const node: Node = {
+ ...SHARED_NODE_PROPERTIES,
+ id: nodeId,
+ type: 'notes',
+ position,
+ data: {
+ id: nodeId,
+ isOpen: true,
+ label: 'Notes',
+ notes: '',
+ type: 'notes',
+ },
+ };
+
+ return node;
+ }
+
+ if (template === undefined) {
+ console.error(`Unable to find template ${type}.`);
+ return;
+ }
+
+ const inputs = reduce(
+ template.inputs,
+ (inputsAccumulator, inputTemplate, inputName) => {
+ const fieldId = uuidv4();
+
+ const inputFieldValue: InputFieldValue = buildInputFieldValue(
+ fieldId,
+ inputTemplate
+ );
+
+ inputsAccumulator[inputName] = inputFieldValue;
+
+ return inputsAccumulator;
+ },
+ {} as Record
+ );
+
+ const outputs = reduce(
+ template.outputs,
+ (outputsAccumulator, outputTemplate, outputName) => {
+ const fieldId = uuidv4();
+
+ const outputFieldValue: OutputFieldValue = {
+ id: fieldId,
+ name: outputName,
+ type: outputTemplate.type,
+ fieldKind: 'output',
+ };
+
+ outputsAccumulator[outputName] = outputFieldValue;
+
+ return outputsAccumulator;
+ },
+ {} as Record
+ );
+
+ const invocation: Node = {
+ ...SHARED_NODE_PROPERTIES,
+ id: nodeId,
+ type: 'invocation',
+ position,
+ data: {
+ id: nodeId,
+ type,
+ version: template.version,
+ label: '',
+ notes: '',
+ isOpen: true,
+ embedWorkflow: false,
+ isIntermediate: type === 'save_image' ? false : true,
+ inputs,
+ outputs,
+ useCache: template.useCache,
+ },
+ };
+
+ return invocation;
+};
diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts
index f8e48bda5c..81bfed5a1a 100644
--- a/invokeai/frontend/web/src/services/api/schema.d.ts
+++ b/invokeai/frontend/web/src/services/api/schema.d.ts
@@ -85,6 +85,37 @@ export type paths = {
*/
put: operations["merge_models"];
};
+ "/api/v1/model/record/": {
+ /**
+ * List Model Records
+ * @description Get a list of models.
+ */
+ get: operations["list_model_records"];
+ };
+ "/api/v1/model/record/i/{key}": {
+ /**
+ * Get Model Record
+ * @description Get a model record
+ */
+ get: operations["get_model_record"];
+ /**
+ * Del Model Record
+ * @description Delete Model
+ */
+ delete: operations["del_model_record"];
+ /**
+ * Update Model Record
+ * @description Update model contents with a new config. If the model name or base fields are changed, then the model is renamed.
+ */
+ patch: operations["update_model_record"];
+ };
+ "/api/v1/model/record/i/": {
+ /**
+ * Add Model Record
+ * @description Add a model using the configuration information appropriate for its type.
+ */
+ post: operations["add_model_record"];
+ };
"/api/v1/images/upload": {
/**
* Upload Image
@@ -482,11 +513,6 @@ export type components = {
*/
version: string;
};
- /**
- * BaseModelType
- * @enum {string}
- */
- BaseModelType: "any" | "sd-1" | "sd-2" | "sdxl" | "sdxl-refiner";
/** Batch */
Batch: {
/**
@@ -617,10 +643,10 @@ export type components = {
/**
* @description The color of the image
* @default {
- * "a": 255,
- * "b": 0,
+ * "r": 0,
* "g": 0,
- * "r": 0
+ * "b": 0,
+ * "a": 255
* }
*/
color?: components["schemas"]["ColorField"];
@@ -999,11 +1025,56 @@ export type components = {
*/
type: "clip_output";
};
+ /**
+ * CLIPVisionDiffusersConfig
+ * @description Model config for ClipVision.
+ */
+ CLIPVisionDiffusersConfig: {
+ /** Path */
+ path: string;
+ /** Name */
+ name: string;
+ base: components["schemas"]["invokeai__backend__model_manager__config__BaseModelType"];
+ /**
+ * Type
+ * @default clip_vision
+ * @constant
+ */
+ type?: "clip_vision";
+ /**
+ * Format
+ * @constant
+ */
+ format: "diffusers";
+ /**
+ * Key
+ * @description unique key for model
+ * @default
+ */
+ key?: string;
+ /**
+ * Original Hash
+ * @description original fasthash of model contents
+ */
+ original_hash?: string | null;
+ /**
+ * Current Hash
+ * @description current fasthash of model contents
+ */
+ current_hash?: string | null;
+ /** Description */
+ description?: string | null;
+ /**
+ * Source
+ * @description Model download source (URL or repo_id)
+ */
+ source?: string | null;
+ };
/** CLIPVisionModelDiffusersConfig */
CLIPVisionModelDiffusersConfig: {
/** Model Name */
model_name: string;
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/**
* Model Type
* @default clip_vision
@@ -1029,7 +1100,7 @@ export type components = {
*/
model_name: string;
/** @description Base model (usually 'Any') */
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
};
/**
* CV2 Infill
@@ -1376,10 +1447,10 @@ export type components = {
/**
* @description The color value
* @default {
- * "a": 255,
- * "b": 0,
+ * "r": 0,
* "g": 0,
- * "r": 0
+ * "b": 0,
+ * "a": 255
* }
*/
color?: components["schemas"]["ColorField"];
@@ -1696,6 +1767,103 @@ export type components = {
*/
resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple";
};
+ /**
+ * ControlNetCheckpointConfig
+ * @description Model config for ControlNet models (diffusers version).
+ */
+ ControlNetCheckpointConfig: {
+ /** Path */
+ path: string;
+ /** Name */
+ name: string;
+ base: components["schemas"]["invokeai__backend__model_manager__config__BaseModelType"];
+ /**
+ * Type
+ * @default controlnet
+ * @constant
+ */
+ type?: "controlnet";
+ /**
+ * Format
+ * @default checkpoint
+ * @constant
+ */
+ format?: "checkpoint";
+ /**
+ * Key
+ * @description unique key for model
+ * @default
+ */
+ key?: string;
+ /**
+ * Original Hash
+ * @description original fasthash of model contents
+ */
+ original_hash?: string | null;
+ /**
+ * Current Hash
+ * @description current fasthash of model contents
+ */
+ current_hash?: string | null;
+ /** Description */
+ description?: string | null;
+ /**
+ * Source
+ * @description Model download source (URL or repo_id)
+ */
+ source?: string | null;
+ /**
+ * Config
+ * @description path to the checkpoint model config file
+ */
+ config: string;
+ };
+ /**
+ * ControlNetDiffusersConfig
+ * @description Model config for ControlNet models (diffusers version).
+ */
+ ControlNetDiffusersConfig: {
+ /** Path */
+ path: string;
+ /** Name */
+ name: string;
+ base: components["schemas"]["invokeai__backend__model_manager__config__BaseModelType"];
+ /**
+ * Type
+ * @default controlnet
+ * @constant
+ */
+ type?: "controlnet";
+ /**
+ * Format
+ * @default diffusers
+ * @constant
+ */
+ format?: "diffusers";
+ /**
+ * Key
+ * @description unique key for model
+ * @default
+ */
+ key?: string;
+ /**
+ * Original Hash
+ * @description original fasthash of model contents
+ */
+ original_hash?: string | null;
+ /**
+ * Current Hash
+ * @description current fasthash of model contents
+ */
+ current_hash?: string | null;
+ /** Description */
+ description?: string | null;
+ /**
+ * Source
+ * @description Model download source (URL or repo_id)
+ */
+ source?: string | null;
+ };
/**
* ControlNet
* @description Collects ControlNet info to pass to other nodes
@@ -1765,7 +1933,7 @@ export type components = {
ControlNetModelCheckpointConfig: {
/** Model Name */
model_name: string;
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/**
* Model Type
* @default controlnet
@@ -1789,7 +1957,7 @@ export type components = {
ControlNetModelDiffusersConfig: {
/** Model Name */
model_name: string;
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/**
* Model Type
* @default controlnet
@@ -1818,7 +1986,7 @@ export type components = {
*/
model_name: string;
/** @description Base model */
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
};
/**
* ControlOutput
@@ -3048,7 +3216,7 @@ export type components = {
* @description The nodes in this graph
*/
nodes?: {
- [key: string]: components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["TestInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["TestInvocation2"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["TestInvocation3"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["StepParamEasingInvocation"];
+ [key: string]: components["schemas"]["GraphInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["TestInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["TestInvocation2"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["TestInvocation3"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"];
};
/**
* Edges
@@ -3085,7 +3253,7 @@ export type components = {
* @description The results of node executions
*/
results: {
- [key: string]: components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["SDXLModelLoaderOutput"];
+ [key: string]: components["schemas"]["IntegerOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["String2Output"] | components["schemas"]["VAEOutput"];
};
/**
* Errors
@@ -3207,6 +3375,51 @@ export type components = {
*/
type: "hed_image_processor";
};
+ /**
+ * IPAdapterConfig
+ * @description Model config for IP Adaptor format models.
+ */
+ IPAdapterConfig: {
+ /** Path */
+ path: string;
+ /** Name */
+ name: string;
+ base: components["schemas"]["invokeai__backend__model_manager__config__BaseModelType"];
+ /**
+ * Type
+ * @default ip_adapter
+ * @constant
+ */
+ type?: "ip_adapter";
+ /**
+ * Format
+ * @constant
+ */
+ format: "invokeai";
+ /**
+ * Key
+ * @description unique key for model
+ * @default
+ */
+ key?: string;
+ /**
+ * Original Hash
+ * @description original fasthash of model contents
+ */
+ original_hash?: string | null;
+ /**
+ * Current Hash
+ * @description current fasthash of model contents
+ */
+ current_hash?: string | null;
+ /** Description */
+ description?: string | null;
+ /**
+ * Source
+ * @description Model download source (URL or repo_id)
+ */
+ source?: string | null;
+ };
/** IPAdapterField */
IPAdapterField: {
/**
@@ -3327,13 +3540,13 @@ export type components = {
*/
model_name: string;
/** @description Base model */
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
};
/** IPAdapterModelInvokeAIConfig */
IPAdapterModelInvokeAIConfig: {
/** Model Name */
model_name: string;
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/**
* Model Type
* @default ip_adapter
@@ -4415,10 +4628,10 @@ export type components = {
/**
* @description The color to use to infill
* @default {
- * "a": 255,
- * "b": 127,
+ * "r": 127,
* "g": 127,
- * "r": 127
+ * "b": 127,
+ * "a": 255
* }
*/
color?: components["schemas"]["ColorField"];
@@ -5130,6 +5343,51 @@ export type components = {
*/
type: "lineart_image_processor";
};
+ /**
+ * LoRAConfig
+ * @description Model config for LoRA/Lycoris models.
+ */
+ LoRAConfig: {
+ /** Path */
+ path: string;
+ /** Name */
+ name: string;
+ base: components["schemas"]["invokeai__backend__model_manager__config__BaseModelType"];
+ /**
+ * Type
+ * @default lora
+ * @constant
+ */
+ type?: "lora";
+ /**
+ * Format
+ * @enum {string}
+ */
+ format: "lycoris" | "diffusers";
+ /**
+ * Key
+ * @description unique key for model
+ * @default
+ */
+ key?: string;
+ /**
+ * Original Hash
+ * @description original fasthash of model contents
+ */
+ original_hash?: string | null;
+ /**
+ * Current Hash
+ * @description current fasthash of model contents
+ */
+ current_hash?: string | null;
+ /** Description */
+ description?: string | null;
+ /**
+ * Source
+ * @description Model download source (URL or repo_id)
+ */
+ source?: string | null;
+ };
/**
* LoRAMetadataField
* @description LoRA Metadata Field
@@ -5147,7 +5405,7 @@ export type components = {
LoRAModelConfig: {
/** Model Name */
model_name: string;
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/**
* Model Type
* @default lora
@@ -5172,7 +5430,7 @@ export type components = {
*/
model_name: string;
/** @description Base model */
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
};
/**
* LoRAModelFormat
@@ -5192,9 +5450,9 @@ export type components = {
*/
model_name: string;
/** @description Base model */
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/** @description Info to load submodel */
- model_type: components["schemas"]["ModelType"];
+ model_type: components["schemas"]["invokeai__backend__model_management__models__base__ModelType"];
/** @description Info to load submodel */
submodel?: components["schemas"]["SubModelType"] | null;
/**
@@ -5275,6 +5533,128 @@ export type components = {
*/
type: "lora_loader_output";
};
+ /**
+ * MainCheckpointConfig
+ * @description Model config for main checkpoint models.
+ */
+ MainCheckpointConfig: {
+ /** Path */
+ path: string;
+ /** Name */
+ name: string;
+ base: components["schemas"]["invokeai__backend__model_manager__config__BaseModelType"];
+ /**
+ * Type
+ * @default main
+ * @constant
+ */
+ type?: "main";
+ /**
+ * Format
+ * @default checkpoint
+ * @constant
+ */
+ format?: "checkpoint";
+ /**
+ * Key
+ * @description unique key for model
+ * @default
+ */
+ key?: string;
+ /**
+ * Original Hash
+ * @description original fasthash of model contents
+ */
+ original_hash?: string | null;
+ /**
+ * Current Hash
+ * @description current fasthash of model contents
+ */
+ current_hash?: string | null;
+ /** Description */
+ description?: string | null;
+ /**
+ * Source
+ * @description Model download source (URL or repo_id)
+ */
+ source?: string | null;
+ /** Vae */
+ vae?: string | null;
+ /** @default normal */
+ variant?: components["schemas"]["invokeai__backend__model_manager__config__ModelVariantType"];
+ /**
+ * Ztsnr Training
+ * @default false
+ */
+ ztsnr_training?: boolean;
+ /**
+ * Config
+ * @description path to the checkpoint model config file
+ */
+ config: string;
+ };
+ /**
+ * MainDiffusersConfig
+ * @description Model config for main diffusers models.
+ */
+ MainDiffusersConfig: {
+ /** Path */
+ path: string;
+ /** Name */
+ name: string;
+ base: components["schemas"]["invokeai__backend__model_manager__config__BaseModelType"];
+ /**
+ * Type
+ * @default main
+ * @constant
+ */
+ type?: "main";
+ /**
+ * Format
+ * @default diffusers
+ * @constant
+ */
+ format?: "diffusers";
+ /**
+ * Key
+ * @description unique key for model
+ * @default
+ */
+ key?: string;
+ /**
+ * Original Hash
+ * @description original fasthash of model contents
+ */
+ original_hash?: string | null;
+ /**
+ * Current Hash
+ * @description current fasthash of model contents
+ */
+ current_hash?: string | null;
+ /** Description */
+ description?: string | null;
+ /**
+ * Source
+ * @description Model download source (URL or repo_id)
+ */
+ source?: string | null;
+ /** Vae */
+ vae?: string | null;
+ /** @default normal */
+ variant?: components["schemas"]["invokeai__backend__model_manager__config__ModelVariantType"];
+ /**
+ * Ztsnr Training
+ * @default false
+ */
+ ztsnr_training?: boolean;
+ /** @default epsilon */
+ prediction_type?: components["schemas"]["invokeai__backend__model_manager__config__SchedulerPredictionType"];
+ /**
+ * Upcast Attention
+ * @default false
+ */
+ upcast_attention?: boolean;
+ };
/**
* MainModelField
* @description Main model field
@@ -5286,9 +5666,9 @@ export type components = {
*/
model_name: string;
/** @description Base model */
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/** @description Model Type */
- model_type: components["schemas"]["ModelType"];
+ model_type: components["schemas"]["invokeai__backend__model_management__models__base__ModelType"];
};
/**
* Main Model
@@ -5808,9 +6188,9 @@ export type components = {
*/
model_name: string;
/** @description Base model */
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/** @description Info to load submodel */
- model_type: components["schemas"]["ModelType"];
+ model_type: components["schemas"]["invokeai__backend__model_management__models__base__ModelType"];
/** @description Info to load submodel */
submodel?: components["schemas"]["SubModelType"] | null;
};
@@ -5841,21 +6221,6 @@ export type components = {
*/
unet: components["schemas"]["UNetField"];
};
- /**
- * ModelType
- * @enum {string}
- */
- ModelType: "onnx" | "main" | "vae" | "lora" | "controlnet" | "embedding" | "ip_adapter" | "clip_vision" | "t2i_adapter";
- /**
- * ModelVariantType
- * @enum {string}
- */
- ModelVariantType: "normal" | "inpaint" | "depth";
- /** ModelsList */
- ModelsList: {
- /** Models */
- models: (components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["IPAdapterModelInvokeAIConfig"] | components["schemas"]["CLIPVisionModelDiffusersConfig"] | components["schemas"]["T2IAdapterModelDiffusersConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"])[];
- };
/**
* Multiply Integers
* @description Multiplies two numbers
@@ -6141,11 +6506,143 @@ export type components = {
*/
type: "prompt_onnx";
};
+ /**
+ * ONNXSD1Config
+ * @description Model config for ONNX format models based on sd-1.
+ */
+ ONNXSD1Config: {
+ /** Path */
+ path: string;
+ /** Name */
+ name: string;
+ /**
+ * Base
+ * @default sd-1
+ * @constant
+ */
+ base?: "sd-1";
+ /**
+ * Type
+ * @default onnx
+ * @constant
+ */
+ type?: "onnx";
+ /**
+ * Format
+ * @enum {string}
+ */
+ format: "onnx" | "olive";
+ /**
+ * Key
+ * @description unique key for model
+ * @default
+ */
+ key?: string;
+ /**
+ * Original Hash
+ * @description original fasthash of model contents
+ */
+ original_hash?: string | null;
+ /**
+ * Current Hash
+ * @description current fasthash of model contents
+ */
+ current_hash?: string | null;
+ /** Description */
+ description?: string | null;
+ /**
+ * Source
+ * @description Model download source (URL or repo_id)
+ */
+ source?: string | null;
+ /** Vae */
+ vae?: string | null;
+ /** @default normal */
+ variant?: components["schemas"]["invokeai__backend__model_manager__config__ModelVariantType"];
+ /**
+ * Ztsnr Training
+ * @default false
+ */
+ ztsnr_training?: boolean;
+ /** @default epsilon */
+ prediction_type?: components["schemas"]["invokeai__backend__model_manager__config__SchedulerPredictionType"];
+ /**
+ * Upcast Attention
+ * @default false
+ */
+ upcast_attention?: boolean;
+ };
+ /**
+ * ONNXSD2Config
+ * @description Model config for ONNX format models based on sd-2.
+ */
+ ONNXSD2Config: {
+ /** Path */
+ path: string;
+ /** Name */
+ name: string;
+ /**
+ * Base
+ * @default sd-2
+ * @constant
+ */
+ base?: "sd-2";
+ /**
+ * Type
+ * @default onnx
+ * @constant
+ */
+ type?: "onnx";
+ /**
+ * Format
+ * @enum {string}
+ */
+ format: "onnx" | "olive";
+ /**
+ * Key
+ * @description unique key for model
+ * @default
+ */
+ key?: string;
+ /**
+ * Original Hash
+ * @description original fasthash of model contents
+ */
+ original_hash?: string | null;
+ /**
+ * Current Hash
+ * @description current fasthash of model contents
+ */
+ current_hash?: string | null;
+ /** Description */
+ description?: string | null;
+ /**
+ * Source
+ * @description Model download source (URL or repo_id)
+ */
+ source?: string | null;
+ /** Vae */
+ vae?: string | null;
+ /** @default normal */
+ variant?: components["schemas"]["invokeai__backend__model_manager__config__ModelVariantType"];
+ /**
+ * Ztsnr Training
+ * @default false
+ */
+ ztsnr_training?: boolean;
+ /** @default v_prediction */
+ prediction_type?: components["schemas"]["invokeai__backend__model_manager__config__SchedulerPredictionType"];
+ /**
+ * Upcast Attention
+ * @default true
+ */
+ upcast_attention?: boolean;
+ };
/** ONNXStableDiffusion1ModelConfig */
ONNXStableDiffusion1ModelConfig: {
/** Model Name */
model_name: string;
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/**
* Model Type
* @default onnx
@@ -6162,13 +6659,13 @@ export type components = {
*/
model_format: "onnx";
error?: components["schemas"]["ModelError"] | null;
- variant: components["schemas"]["ModelVariantType"];
+ variant: components["schemas"]["invokeai__backend__model_management__models__base__ModelVariantType"];
};
/** ONNXStableDiffusion2ModelConfig */
ONNXStableDiffusion2ModelConfig: {
/** Model Name */
model_name: string;
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/**
* Model Type
* @default onnx
@@ -6185,8 +6682,8 @@ export type components = {
*/
model_format: "onnx";
error?: components["schemas"]["ModelError"] | null;
- variant: components["schemas"]["ModelVariantType"];
- prediction_type: components["schemas"]["SchedulerPredictionType"];
+ variant: components["schemas"]["invokeai__backend__model_management__models__base__ModelVariantType"];
+ prediction_type: components["schemas"]["invokeai__backend__model_management__models__base__SchedulerPredictionType"];
/** Upcast Attention */
upcast_attention: boolean;
};
@@ -6315,9 +6812,9 @@ export type components = {
*/
model_name: string;
/** @description Base model */
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/** @description Model Type */
- model_type: components["schemas"]["ModelType"];
+ model_type: components["schemas"]["invokeai__backend__model_management__models__base__ModelType"];
};
/**
* ONNX Main Model
@@ -7360,11 +7857,6 @@ export type components = {
*/
type: "scheduler_output";
};
- /**
- * SchedulerPredictionType
- * @enum {string}
- */
- SchedulerPredictionType: "epsilon" | "v_prediction" | "sample";
/**
* Seamless
* @description Applies the seamless transformation to the Model UNet and VAE.
@@ -7716,7 +8208,7 @@ export type components = {
StableDiffusion1ModelCheckpointConfig: {
/** Model Name */
model_name: string;
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/**
* Model Type
* @default main
@@ -7737,13 +8229,13 @@ export type components = {
vae?: string | null;
/** Config */
config: string;
- variant: components["schemas"]["ModelVariantType"];
+ variant: components["schemas"]["invokeai__backend__model_management__models__base__ModelVariantType"];
};
/** StableDiffusion1ModelDiffusersConfig */
StableDiffusion1ModelDiffusersConfig: {
/** Model Name */
model_name: string;
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/**
* Model Type
* @default main
@@ -7762,13 +8254,13 @@ export type components = {
error?: components["schemas"]["ModelError"] | null;
/** Vae */
vae?: string | null;
- variant: components["schemas"]["ModelVariantType"];
+ variant: components["schemas"]["invokeai__backend__model_management__models__base__ModelVariantType"];
};
/** StableDiffusion2ModelCheckpointConfig */
StableDiffusion2ModelCheckpointConfig: {
/** Model Name */
model_name: string;
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/**
* Model Type
* @default main
@@ -7789,13 +8281,13 @@ export type components = {
vae?: string | null;
/** Config */
config: string;
- variant: components["schemas"]["ModelVariantType"];
+ variant: components["schemas"]["invokeai__backend__model_management__models__base__ModelVariantType"];
};
/** StableDiffusion2ModelDiffusersConfig */
StableDiffusion2ModelDiffusersConfig: {
/** Model Name */
model_name: string;
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/**
* Model Type
* @default main
@@ -7814,13 +8306,13 @@ export type components = {
error?: components["schemas"]["ModelError"] | null;
/** Vae */
vae?: string | null;
- variant: components["schemas"]["ModelVariantType"];
+ variant: components["schemas"]["invokeai__backend__model_management__models__base__ModelVariantType"];
};
/** StableDiffusionXLModelCheckpointConfig */
StableDiffusionXLModelCheckpointConfig: {
/** Model Name */
model_name: string;
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/**
* Model Type
* @default main
@@ -7841,13 +8333,13 @@ export type components = {
vae?: string | null;
/** Config */
config: string;
- variant: components["schemas"]["ModelVariantType"];
+ variant: components["schemas"]["invokeai__backend__model_management__models__base__ModelVariantType"];
};
/** StableDiffusionXLModelDiffusersConfig */
StableDiffusionXLModelDiffusersConfig: {
/** Model Name */
model_name: string;
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/**
* Model Type
* @default main
@@ -7866,7 +8358,7 @@ export type components = {
error?: components["schemas"]["ModelError"] | null;
/** Vae */
vae?: string | null;
- variant: components["schemas"]["ModelVariantType"];
+ variant: components["schemas"]["invokeai__backend__model_management__models__base__ModelVariantType"];
};
/**
* Step Param Easing
@@ -8463,7 +8955,7 @@ export type components = {
T2IAdapterModelDiffusersConfig: {
/** Model Name */
model_name: string;
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/**
* Model Type
* @default t2i_adapter
@@ -8489,7 +8981,7 @@ export type components = {
*/
model_name: string;
/** @description Base model */
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
};
/** T2IAdapterOutput */
T2IAdapterOutput: {
@@ -8505,6 +8997,51 @@ export type components = {
*/
type: "t2i_adapter_output";
};
+ /**
+ * T2IConfig
+ * @description Model config for T2I.
+ */
+ T2IConfig: {
+ /** Path */
+ path: string;
+ /** Name */
+ name: string;
+ base: components["schemas"]["invokeai__backend__model_manager__config__BaseModelType"];
+ /**
+ * Type
+ * @default t2i_adapter
+ * @constant
+ */
+ type?: "t2i_adapter";
+ /**
+ * Format
+ * @constant
+ */
+ format: "diffusers";
+ /**
+ * Key
+ * @description unique key for model
+ * @default
+ */
+ key?: string;
+ /**
+ * Original Hash
+ * @description original fasthash of model contents
+ */
+ original_hash?: string | null;
+ /**
+ * Current Hash
+ * @description current fasthash of model contents
+ */
+ current_hash?: string | null;
+ /** Description */
+ description?: string | null;
+ /**
+ * Source
+ * @description Model download source (URL or repo_id)
+ */
+ source?: string | null;
+ };
/** TestInvocation */
TestInvocation: {
/**
@@ -8589,11 +9126,56 @@ export type components = {
*/
type: "test_invocation_3";
};
+ /**
+ * TextualInversionConfig
+ * @description Model config for textual inversion embeddings.
+ */
+ TextualInversionConfig: {
+ /** Path */
+ path: string;
+ /** Name */
+ name: string;
+ base: components["schemas"]["invokeai__backend__model_manager__config__BaseModelType"];
+ /**
+ * Type
+ * @default embedding
+ * @constant
+ */
+ type?: "embedding";
+ /**
+ * Format
+ * @enum {string}
+ */
+ format: "embedding_file" | "embedding_folder";
+ /**
+ * Key
+ * @description unique key for model
+ * @default
+ */
+ key?: string;
+ /**
+ * Original Hash
+ * @description original fasthash of model contents
+ */
+ original_hash?: string | null;
+ /**
+ * Current Hash
+ * @description current fasthash of model contents
+ */
+ current_hash?: string | null;
+ /** Description */
+ description?: string | null;
+ /**
+ * Source
+ * @description Model download source (URL or repo_id)
+ */
+ source?: string | null;
+ };
/** TextualInversionModelConfig */
TextualInversionModelConfig: {
/** Model Name */
model_name: string;
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/**
* Model Type
* @default embedding
@@ -8709,7 +9291,7 @@ export type components = {
*/
model_name: string;
/** @description Base model */
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
};
/**
* VAEOutput
@@ -8728,6 +9310,98 @@ export type components = {
*/
type: "vae_output";
};
+ /**
+ * VaeCheckpointConfig
+ * @description Model config for standalone VAE models.
+ */
+ VaeCheckpointConfig: {
+ /** Path */
+ path: string;
+ /** Name */
+ name: string;
+ base: components["schemas"]["invokeai__backend__model_manager__config__BaseModelType"];
+ /**
+ * Type
+ * @default vae
+ * @constant
+ */
+ type?: "vae";
+ /**
+ * Format
+ * @default checkpoint
+ * @constant
+ */
+ format?: "checkpoint";
+ /**
+ * Key
+ * @description unique key for model
+ * @default
+ */
+ key?: string;
+ /**
+ * Original Hash
+ * @description original fasthash of model contents
+ */
+ original_hash?: string | null;
+ /**
+ * Current Hash
+ * @description current fasthash of model contents
+ */
+ current_hash?: string | null;
+ /** Description */
+ description?: string | null;
+ /**
+ * Source
+ * @description Model download source (URL or repo_id)
+ */
+ source?: string | null;
+ };
+ /**
+ * VaeDiffusersConfig
+ * @description Model config for standalone VAE models (diffusers version).
+ */
+ VaeDiffusersConfig: {
+ /** Path */
+ path: string;
+ /** Name */
+ name: string;
+ base: components["schemas"]["invokeai__backend__model_manager__config__BaseModelType"];
+ /**
+ * Type
+ * @default vae
+ * @constant
+ */
+ type?: "vae";
+ /**
+ * Format
+ * @default diffusers
+ * @constant
+ */
+ format?: "diffusers";
+ /**
+ * Key
+ * @description unique key for model
+ * @default
+ */
+ key?: string;
+ /**
+ * Original Hash
+ * @description original fasthash of model contents
+ */
+ original_hash?: string | null;
+ /**
+ * Current Hash
+ * @description current fasthash of model contents
+ */
+ current_hash?: string | null;
+ /** Description */
+ description?: string | null;
+ /**
+ * Source
+ * @description Model download source (URL or repo_id)
+ */
+ source?: string | null;
+ };
/** VaeField */
VaeField: {
/** @description Info to load vae submodel */
@@ -8776,7 +9450,7 @@ export type components = {
VaeModelConfig: {
/** Model Name */
model_name: string;
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/**
* Model Type
* @default vae
@@ -8845,6 +9519,63 @@ export type components = {
*/
type: "zoe_depth_image_processor";
};
+ /**
+ * ModelsList
+ * @description Return list of configs.
+ */
+ invokeai__app__api__routers__model_records__ModelsList: {
+ /** Models */
+ models: ((components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"]) | (components["schemas"]["ONNXSD1Config"] | components["schemas"]["ONNXSD2Config"]) | (components["schemas"]["VaeDiffusersConfig"] | components["schemas"]["VaeCheckpointConfig"]) | (components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"]) | components["schemas"]["LoRAConfig"] | components["schemas"]["TextualInversionConfig"] | components["schemas"]["IPAdapterConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["T2IConfig"])[];
+ };
+ /** ModelsList */
+ invokeai__app__api__routers__models__ModelsList: {
+ /** Models */
+ models: (components["schemas"]["ONNXStableDiffusion1ModelConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelCheckpointConfig"] | components["schemas"]["ControlNetModelDiffusersConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["IPAdapterModelInvokeAIConfig"] | components["schemas"]["CLIPVisionModelDiffusersConfig"] | components["schemas"]["T2IAdapterModelDiffusersConfig"] | components["schemas"]["ONNXStableDiffusion2ModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusionXLModelCheckpointConfig"] | components["schemas"]["StableDiffusionXLModelDiffusersConfig"])[];
+ };
+ /**
+ * BaseModelType
+ * @enum {string}
+ */
+ invokeai__backend__model_management__models__base__BaseModelType: "any" | "sd-1" | "sd-2" | "sdxl" | "sdxl-refiner";
+ /**
+ * ModelType
+ * @enum {string}
+ */
+ invokeai__backend__model_management__models__base__ModelType: "onnx" | "main" | "vae" | "lora" | "controlnet" | "embedding" | "ip_adapter" | "clip_vision" | "t2i_adapter";
+ /**
+ * ModelVariantType
+ * @enum {string}
+ */
+ invokeai__backend__model_management__models__base__ModelVariantType: "normal" | "inpaint" | "depth";
+ /**
+ * SchedulerPredictionType
+ * @enum {string}
+ */
+ invokeai__backend__model_management__models__base__SchedulerPredictionType: "epsilon" | "v_prediction" | "sample";
+ /**
+ * BaseModelType
+ * @description Base model type.
+ * @enum {string}
+ */
+ invokeai__backend__model_manager__config__BaseModelType: "any" | "sd-1" | "sd-2" | "sdxl" | "sdxl-refiner";
+ /**
+ * ModelType
+ * @description Model type.
+ * @enum {string}
+ */
+ invokeai__backend__model_manager__config__ModelType: "onnx" | "main" | "vae" | "lora" | "controlnet" | "embedding" | "ip_adapter" | "clip_vision" | "t2i_adapter";
+ /**
+ * ModelVariantType
+ * @description Variant type.
+ * @enum {string}
+ */
+ invokeai__backend__model_manager__config__ModelVariantType: "normal" | "inpaint" | "depth";
+ /**
+ * SchedulerPredictionType
+ * @description Scheduler prediction type.
+ * @enum {string}
+ */
+ invokeai__backend__model_manager__config__SchedulerPredictionType: "epsilon" | "v_prediction" | "sample";
/**
* Input
* @description The type of input a field accepts.
@@ -8934,11 +9665,11 @@ export type components = {
ui_order: number | null;
};
/**
- * IPAdapterModelFormat
+ * StableDiffusionXLModelFormat
* @description An enumeration.
* @enum {string}
*/
- IPAdapterModelFormat: "invokeai";
+ StableDiffusionXLModelFormat: "checkpoint" | "diffusers";
/**
* StableDiffusionOnnxModelFormat
* @description An enumeration.
@@ -8951,36 +9682,36 @@ export type components = {
* @enum {string}
*/
T2IAdapterModelFormat: "diffusers";
- /**
- * CLIPVisionModelFormat
- * @description An enumeration.
- * @enum {string}
- */
- CLIPVisionModelFormat: "diffusers";
- /**
- * StableDiffusionXLModelFormat
- * @description An enumeration.
- * @enum {string}
- */
- StableDiffusionXLModelFormat: "checkpoint" | "diffusers";
/**
* StableDiffusion1ModelFormat
* @description An enumeration.
* @enum {string}
*/
StableDiffusion1ModelFormat: "checkpoint" | "diffusers";
- /**
- * ControlNetModelFormat
- * @description An enumeration.
- * @enum {string}
- */
- ControlNetModelFormat: "checkpoint" | "diffusers";
/**
* StableDiffusion2ModelFormat
* @description An enumeration.
* @enum {string}
*/
StableDiffusion2ModelFormat: "checkpoint" | "diffusers";
+ /**
+ * CLIPVisionModelFormat
+ * @description An enumeration.
+ * @enum {string}
+ */
+ CLIPVisionModelFormat: "diffusers";
+ /**
+ * IPAdapterModelFormat
+ * @description An enumeration.
+ * @enum {string}
+ */
+ IPAdapterModelFormat: "invokeai";
+ /**
+ * ControlNetModelFormat
+ * @description An enumeration.
+ * @enum {string}
+ */
+ ControlNetModelFormat: "checkpoint" | "diffusers";
};
responses: never;
parameters: never;
@@ -9058,16 +9789,16 @@ export type operations = {
parameters: {
query?: {
/** @description Base models to include */
- base_models?: components["schemas"]["BaseModelType"][] | null;
+ base_models?: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"][] | null;
/** @description The type of model to get */
- model_type?: components["schemas"]["ModelType"] | null;
+ model_type?: components["schemas"]["invokeai__backend__model_management__models__base__ModelType"] | null;
};
};
responses: {
/** @description Successful Response */
200: {
content: {
- "application/json": components["schemas"]["ModelsList"];
+ "application/json": components["schemas"]["invokeai__app__api__routers__models__ModelsList"];
};
};
/** @description Validation Error */
@@ -9086,9 +9817,9 @@ export type operations = {
parameters: {
path: {
/** @description Base model */
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/** @description The type of model */
- model_type: components["schemas"]["ModelType"];
+ model_type: components["schemas"]["invokeai__backend__model_management__models__base__ModelType"];
/** @description model name */
model_name: string;
};
@@ -9118,9 +9849,9 @@ export type operations = {
parameters: {
path: {
/** @description Base model */
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/** @description The type of model */
- model_type: components["schemas"]["ModelType"];
+ model_type: components["schemas"]["invokeai__backend__model_management__models__base__ModelType"];
/** @description model name */
model_name: string;
};
@@ -9247,9 +9978,9 @@ export type operations = {
};
path: {
/** @description Base model */
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
/** @description The type of model */
- model_type: components["schemas"]["ModelType"];
+ model_type: components["schemas"]["invokeai__backend__model_management__models__base__ModelType"];
/** @description model name */
model_name: string;
};
@@ -9341,7 +10072,7 @@ export type operations = {
parameters: {
path: {
/** @description Base model */
- base_model: components["schemas"]["BaseModelType"];
+ base_model: components["schemas"]["invokeai__backend__model_management__models__base__BaseModelType"];
};
};
requestBody: {
@@ -9372,6 +10103,172 @@ export type operations = {
};
};
};
+ /**
+ * List Model Records
+ * @description Get a list of models.
+ */
+ list_model_records: {
+ parameters: {
+ query?: {
+ /** @description Base models to include */
+ base_models?: components["schemas"]["invokeai__backend__model_manager__config__BaseModelType"][] | null;
+ /** @description The type of model to get */
+ model_type?: components["schemas"]["invokeai__backend__model_manager__config__ModelType"] | null;
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ content: {
+ "application/json": components["schemas"]["invokeai__app__api__routers__model_records__ModelsList"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ /**
+ * Get Model Record
+ * @description Get a model record
+ */
+ get_model_record: {
+ parameters: {
+ path: {
+ /** @description Key of the model record to fetch. */
+ key: string;
+ };
+ };
+ responses: {
+ /** @description Success */
+ 200: {
+ content: {
+ "application/json": (components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"]) | (components["schemas"]["ONNXSD1Config"] | components["schemas"]["ONNXSD2Config"]) | (components["schemas"]["VaeDiffusersConfig"] | components["schemas"]["VaeCheckpointConfig"]) | (components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"]) | components["schemas"]["LoRAConfig"] | components["schemas"]["TextualInversionConfig"] | components["schemas"]["IPAdapterConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["T2IConfig"];
+ };
+ };
+ /** @description Bad request */
+ 400: {
+ content: never;
+ };
+ /** @description The model could not be found */
+ 404: {
+ content: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ /**
+ * Del Model Record
+ * @description Delete Model
+ */
+ del_model_record: {
+ parameters: {
+ path: {
+ /** @description Unique key of model to remove from model registry. */
+ key: string;
+ };
+ };
+ responses: {
+ /** @description Model deleted successfully */
+ 204: {
+ content: never;
+ };
+ /** @description Model not found */
+ 404: {
+ content: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ /**
+ * Update Model Record
+ * @description Update model contents with a new config. If the model name or base fields are changed, then the model is renamed.
+ */
+ update_model_record: {
+ parameters: {
+ path: {
+ /** @description Unique key of model */
+ key: string;
+ };
+ };
+ requestBody: {
+ content: {
+ "application/json": (components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"]) | (components["schemas"]["ONNXSD1Config"] | components["schemas"]["ONNXSD2Config"]) | (components["schemas"]["VaeDiffusersConfig"] | components["schemas"]["VaeCheckpointConfig"]) | (components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"]) | components["schemas"]["LoRAConfig"] | components["schemas"]["TextualInversionConfig"] | components["schemas"]["IPAdapterConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["T2IConfig"];
+ };
+ };
+ responses: {
+ /** @description The model was updated successfully */
+ 200: {
+ content: {
+ "application/json": (components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"]) | (components["schemas"]["ONNXSD1Config"] | components["schemas"]["ONNXSD2Config"]) | (components["schemas"]["VaeDiffusersConfig"] | components["schemas"]["VaeCheckpointConfig"]) | (components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"]) | components["schemas"]["LoRAConfig"] | components["schemas"]["TextualInversionConfig"] | components["schemas"]["IPAdapterConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["T2IConfig"];
+ };
+ };
+ /** @description Bad request */
+ 400: {
+ content: never;
+ };
+ /** @description The model could not be found */
+ 404: {
+ content: never;
+ };
+ /** @description There is already a model corresponding to the new name */
+ 409: {
+ content: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ /**
+ * Add Model Record
+ * @description Add a model using the configuration information appropriate for its type.
+ */
+ add_model_record: {
+ requestBody: {
+ content: {
+ "application/json": (components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"]) | (components["schemas"]["ONNXSD1Config"] | components["schemas"]["ONNXSD2Config"]) | (components["schemas"]["VaeDiffusersConfig"] | components["schemas"]["VaeCheckpointConfig"]) | (components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"]) | components["schemas"]["LoRAConfig"] | components["schemas"]["TextualInversionConfig"] | components["schemas"]["IPAdapterConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["T2IConfig"];
+ };
+ };
+ responses: {
+ /** @description The model added successfully */
+ 201: {
+ content: {
+ "application/json": (components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"]) | (components["schemas"]["ONNXSD1Config"] | components["schemas"]["ONNXSD2Config"]) | (components["schemas"]["VaeDiffusersConfig"] | components["schemas"]["VaeCheckpointConfig"]) | (components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"]) | components["schemas"]["LoRAConfig"] | components["schemas"]["TextualInversionConfig"] | components["schemas"]["IPAdapterConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["T2IConfig"];
+ };
+ };
+ /** @description There is already a model corresponding to this path or repo_id */
+ 409: {
+ content: never;
+ };
+ /** @description Unrecognized file/folder format */
+ 415: {
+ content: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
/**
* Upload Image
* @description Uploads an image
diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts
index 526a4d6601..9038374912 100644
--- a/invokeai/frontend/web/src/services/api/types.ts
+++ b/invokeai/frontend/web/src/services/api/types.ts
@@ -48,9 +48,11 @@ export type OffsetPaginatedResults_ImageDTO_ =
s['OffsetPaginatedResults_ImageDTO_'];
// Models
-export type ModelType = s['ModelType'];
+export type ModelType =
+ s['invokeai__backend__model_management__models__base__ModelType'];
export type SubModelType = s['SubModelType'];
-export type BaseModelType = s['BaseModelType'];
+export type BaseModelType =
+ s['invokeai__backend__model_management__models__base__BaseModelType'];
export type MainModelField = s['MainModelField'];
export type OnnxModelField = s['OnnxModelField'];
export type VAEModelField = s['VAEModelField'];
@@ -58,7 +60,7 @@ export type LoRAModelField = s['LoRAModelField'];
export type ControlNetModelField = s['ControlNetModelField'];
export type IPAdapterModelField = s['IPAdapterModelField'];
export type T2IAdapterModelField = s['T2IAdapterModelField'];
-export type ModelsList = s['ModelsList'];
+export type ModelsList = s['invokeai__app__api__routers__models__ModelsList'];
export type ControlField = s['ControlField'];
export type IPAdapterField = s['IPAdapterField'];