diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index 79a46cb7a1..e9793b9424 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -167,13 +167,13 @@ class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithBoard): title="Canny Processor", tags=["controlnet", "canny"], category="controlnet", - version="1.3.2", + version="1.3.3", ) class CannyImageProcessorInvocation(ImageProcessorInvocation): """Canny edge detection for ControlNet""" - detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res) - image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res) + detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res) + image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res) low_threshold: int = InputField( default=100, ge=0, le=255, description="The low threshold of the Canny pixel gradient (0-255)" ) @@ -201,13 +201,13 @@ class CannyImageProcessorInvocation(ImageProcessorInvocation): title="HED (softedge) Processor", tags=["controlnet", "hed", "softedge"], category="controlnet", - version="1.2.2", + version="1.2.3", ) class HedImageProcessorInvocation(ImageProcessorInvocation): """Applies HED edge detection to image""" - detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res) - image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res) + detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res) + image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res) # safe not supported in controlnet_aux v0.0.3 # safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode) scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode) @@ -230,13 +230,13 @@ class HedImageProcessorInvocation(ImageProcessorInvocation): title="Lineart Processor", tags=["controlnet", "lineart"], category="controlnet", - version="1.2.2", + version="1.2.3", ) class LineartImageProcessorInvocation(ImageProcessorInvocation): """Applies line art processing to image""" - detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res) - image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res) + detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res) + image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res) coarse: bool = InputField(default=False, description="Whether to use coarse mode") def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image: @@ -252,13 +252,13 @@ class LineartImageProcessorInvocation(ImageProcessorInvocation): title="Lineart Anime Processor", tags=["controlnet", "lineart", "anime"], category="controlnet", - version="1.2.2", + version="1.2.3", ) class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation): """Applies line art anime processing to image""" - detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res) - image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res) + detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res) + image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res) def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image: processor = LineartAnimeProcessor() @@ -275,15 +275,15 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation): title="Midas Depth Processor", tags=["controlnet", "midas"], category="controlnet", - version="1.2.3", + version="1.2.4", ) class MidasDepthImageProcessorInvocation(ImageProcessorInvocation): """Applies Midas depth processing to image""" a_mult: float = InputField(default=2.0, ge=0, description="Midas parameter `a_mult` (a = a_mult * PI)") bg_th: float = InputField(default=0.1, ge=0, description="Midas parameter `bg_th`") - detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res) - image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res) + detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res) + image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res) # depth_and_normal not supported in controlnet_aux v0.0.3 # depth_and_normal: bool = InputField(default=False, description="whether to use depth and normal mode") @@ -307,13 +307,13 @@ class MidasDepthImageProcessorInvocation(ImageProcessorInvocation): title="Normal BAE Processor", tags=["controlnet"], category="controlnet", - version="1.2.2", + version="1.2.3", ) class NormalbaeImageProcessorInvocation(ImageProcessorInvocation): """Applies NormalBae processing to image""" - detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res) - image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res) + detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res) + image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res) def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image: normalbae_processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators") @@ -324,13 +324,13 @@ class NormalbaeImageProcessorInvocation(ImageProcessorInvocation): @invocation( - "mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.2.2" + "mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.2.3" ) class MlsdImageProcessorInvocation(ImageProcessorInvocation): """Applies MLSD processing to image""" - detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res) - image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res) + detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res) + image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res) thr_v: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_v`") thr_d: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_d`") @@ -347,13 +347,13 @@ class MlsdImageProcessorInvocation(ImageProcessorInvocation): @invocation( - "pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.2.2" + "pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.2.3" ) class PidiImageProcessorInvocation(ImageProcessorInvocation): """Applies PIDI processing to image""" - detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res) - image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res) + detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res) + image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res) safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode) scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode) @@ -374,13 +374,13 @@ class PidiImageProcessorInvocation(ImageProcessorInvocation): title="Content Shuffle Processor", tags=["controlnet", "contentshuffle"], category="controlnet", - version="1.2.2", + version="1.2.3", ) class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation): """Applies content shuffle processing to image""" - detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res) - image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res) + detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res) + image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res) h: int = InputField(default=512, ge=0, description="Content shuffle `h` parameter") w: int = InputField(default=512, ge=0, description="Content shuffle `w` parameter") f: int = InputField(default=256, ge=0, description="Content shuffle `f` parameter") @@ -404,7 +404,7 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation): title="Zoe (Depth) Processor", tags=["controlnet", "zoe", "depth"], category="controlnet", - version="1.2.2", + version="1.2.3", ) class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation): """Applies Zoe depth processing to image""" @@ -420,15 +420,15 @@ class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation): title="Mediapipe Face Processor", tags=["controlnet", "mediapipe", "face"], category="controlnet", - version="1.2.3", + version="1.2.4", ) class MediapipeFaceProcessorInvocation(ImageProcessorInvocation): """Applies mediapipe face processing to image""" max_faces: int = InputField(default=1, ge=1, description="Maximum number of faces to detect") min_confidence: float = InputField(default=0.5, ge=0, le=1, description="Minimum confidence for face detection") - detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res) - image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res) + detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res) + image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res) def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image: mediapipe_face_processor = MediapipeFaceDetector() @@ -447,7 +447,7 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation): title="Leres (Depth) Processor", tags=["controlnet", "leres", "depth"], category="controlnet", - version="1.2.2", + version="1.2.3", ) class LeresImageProcessorInvocation(ImageProcessorInvocation): """Applies leres processing to image""" @@ -455,8 +455,8 @@ class LeresImageProcessorInvocation(ImageProcessorInvocation): thr_a: float = InputField(default=0, description="Leres parameter `thr_a`") thr_b: float = InputField(default=0, description="Leres parameter `thr_b`") boost: bool = InputField(default=False, description="Whether to use boost mode") - detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res) - image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res) + detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res) + image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res) def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image: leres_processor = LeresDetector.from_pretrained("lllyasviel/Annotators") @@ -476,7 +476,7 @@ class LeresImageProcessorInvocation(ImageProcessorInvocation): title="Tile Resample Processor", tags=["controlnet", "tile"], category="controlnet", - version="1.2.2", + version="1.2.3", ) class TileResamplerProcessorInvocation(ImageProcessorInvocation): """Tile resampler processor""" @@ -516,13 +516,13 @@ class TileResamplerProcessorInvocation(ImageProcessorInvocation): title="Segment Anything Processor", tags=["controlnet", "segmentanything"], category="controlnet", - version="1.2.3", + version="1.2.4", ) class SegmentAnythingProcessorInvocation(ImageProcessorInvocation): """Applies segment anything processing to image""" - detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res) - image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res) + detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res) + image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res) def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image: # segment_anything_processor = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints") @@ -563,12 +563,12 @@ class SamDetectorReproducibleColors(SamDetector): title="Color Map Processor", tags=["controlnet"], category="controlnet", - version="1.2.2", + version="1.2.3", ) class ColorMapImageProcessorInvocation(ImageProcessorInvocation): """Generates a color map from the provided image""" - color_map_tile_size: int = InputField(default=64, ge=0, description=FieldDescriptions.tile_size) + color_map_tile_size: int = InputField(default=64, ge=1, description=FieldDescriptions.tile_size) def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image: np_image = np.array(image, dtype=np.uint8) @@ -595,7 +595,7 @@ DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small"] title="Depth Anything Processor", tags=["controlnet", "depth", "depth anything"], category="controlnet", - version="1.1.1", + version="1.1.2", ) class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation): """Generates a depth map based on the Depth Anything algorithm""" @@ -603,7 +603,7 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation): model_size: DEPTH_ANYTHING_MODEL_SIZES = InputField( default="small", description="The size of the depth model to use" ) - resolution: int = InputField(default=512, ge=64, multiple_of=64, description=FieldDescriptions.image_res) + resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res) def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image: def loader(model_path: Path): @@ -622,7 +622,7 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation): title="DW Openpose Image Processor", tags=["controlnet", "dwpose", "openpose"], category="controlnet", - version="1.1.0", + version="1.1.1", ) class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation): """Generates an openpose pose from an image using DWPose""" @@ -630,7 +630,7 @@ class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation): draw_body: bool = InputField(default=True) draw_face: bool = InputField(default=False) draw_hands: bool = InputField(default=False) - image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res) + image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res) def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image: dw_openpose = DWOpenposeDetector(context) @@ -649,15 +649,15 @@ class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation): title="Heuristic Resize", tags=["image, controlnet"], category="image", - version="1.0.0", + version="1.0.1", classification=Classification.Prototype, ) class HeuristicResizeInvocation(BaseInvocation): """Resize an image using a heuristic method. Preserves edge maps.""" image: ImageField = InputField(description="The image to resize") - width: int = InputField(default=512, gt=0, description="The width to resize to (px)") - height: int = InputField(default=512, gt=0, description="The height to resize to (px)") + width: int = InputField(default=512, ge=1, description="The width to resize to (px)") + height: int = InputField(default=512, ge=1, description="The height to resize to (px)") def invoke(self, context: InvocationContext) -> ImageOutput: image = context.images.get_pil(self.image.image_name, "RGB") diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 4ad63f4f89..3d1439f7db 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -3,7 +3,7 @@ import inspect import math from contextlib import ExitStack from functools import singledispatchmethod -from typing import Any, Iterator, List, Literal, Optional, Tuple, Union +from typing import Any, Dict, Iterator, List, Literal, Optional, Tuple, Union import einops import numpy as np @@ -11,7 +11,6 @@ import numpy.typing as npt import torch import torchvision import torchvision.transforms as T -from diffusers import AutoencoderKL, AutoencoderTiny from diffusers.configuration_utils import ConfigMixin from diffusers.image_processor import VaeImageProcessor from diffusers.models.adapter import T2IAdapter @@ -21,9 +20,12 @@ from diffusers.models.attention_processor import ( LoRAXFormersAttnProcessor, XFormersAttnProcessor, ) +from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL +from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel -from diffusers.schedulers import DPMSolverSDEScheduler -from diffusers.schedulers import SchedulerMixin as Scheduler +from diffusers.schedulers.scheduling_dpmsolver_sde import DPMSolverSDEScheduler +from diffusers.schedulers.scheduling_tcd import TCDScheduler +from diffusers.schedulers.scheduling_utils import SchedulerMixin as Scheduler from PIL import Image, ImageFilter from pydantic import field_validator from torchvision.transforms.functional import resize as tv_resize @@ -521,9 +523,10 @@ class DenoiseLatentsInvocation(BaseInvocation): ) if is_sdxl: - return SDXLConditioningInfo( - embeds=text_embedding, pooled_embeds=pooled_embedding, add_time_ids=add_time_ids - ), regions + return ( + SDXLConditioningInfo(embeds=text_embedding, pooled_embeds=pooled_embedding, add_time_ids=add_time_ids), + regions, + ) return BasicConditioningInfo(embeds=text_embedding), regions def get_conditioning_data( @@ -825,7 +828,7 @@ class DenoiseLatentsInvocation(BaseInvocation): denoising_start: float, denoising_end: float, seed: int, - ) -> Tuple[int, List[int], int]: + ) -> Tuple[int, List[int], int, Dict[str, Any]]: assert isinstance(scheduler, ConfigMixin) if scheduler.config.get("cpu_only", False): scheduler.set_timesteps(steps, device="cpu") @@ -853,13 +856,15 @@ class DenoiseLatentsInvocation(BaseInvocation): timesteps = timesteps[t_start_idx : t_start_idx + t_end_idx] num_inference_steps = len(timesteps) // scheduler.order - scheduler_step_kwargs = {} + scheduler_step_kwargs: Dict[str, Any] = {} scheduler_step_signature = inspect.signature(scheduler.step) if "generator" in scheduler_step_signature.parameters: # At some point, someone decided that schedulers that accept a generator should use the original seed with # all bits flipped. I don't know the original rationale for this, but now we must keep it like this for # reproducibility. - scheduler_step_kwargs = {"generator": torch.Generator(device=device).manual_seed(seed ^ 0xFFFFFFFF)} + scheduler_step_kwargs.update({"generator": torch.Generator(device=device).manual_seed(seed ^ 0xFFFFFFFF)}) + if isinstance(scheduler, TCDScheduler): + scheduler_step_kwargs.update({"eta": 1.0}) return num_inference_steps, timesteps, init_timestep, scheduler_step_kwargs diff --git a/invokeai/backend/stable_diffusion/schedulers/schedulers.py b/invokeai/backend/stable_diffusion/schedulers/schedulers.py index c824d94dca..3a55d52d4a 100644 --- a/invokeai/backend/stable_diffusion/schedulers/schedulers.py +++ b/invokeai/backend/stable_diffusion/schedulers/schedulers.py @@ -13,6 +13,7 @@ from diffusers import ( LCMScheduler, LMSDiscreteScheduler, PNDMScheduler, + TCDScheduler, UniPCMultistepScheduler, ) @@ -40,4 +41,5 @@ SCHEDULER_MAP = { "dpmpp_sde_k": (DPMSolverSDEScheduler, {"use_karras_sigmas": True, "noise_sampler_seed": 0}), "unipc": (UniPCMultistepScheduler, {"cpu_only": True}), "lcm": (LCMScheduler, {}), + "tcd": (TCDScheduler, {}), } diff --git a/invokeai/frontend/web/package.json b/invokeai/frontend/web/package.json index 9e661e0737..2730367fe5 100644 --- a/invokeai/frontend/web/package.json +++ b/invokeai/frontend/web/package.json @@ -25,7 +25,7 @@ "typegen": "node scripts/typegen.js", "preview": "vite preview", "lint:knip": "knip", - "lint:dpdm": "dpdm --no-warning --no-tree --transform --exit-code circular:0 src/main.tsx", + "lint:dpdm": "dpdm --no-warning --no-tree --transform --exit-code circular:1 src/main.tsx", "lint:eslint": "eslint --max-warnings=0 .", "lint:prettier": "prettier --check .", "lint:tsc": "tsc --noEmit", diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 885a937de3..c80283b664 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -225,7 +225,7 @@ "composition": "Composition Only", "safe": "Safe", "saveControlImage": "Save Control Image", - "scribble": "scribble", + "scribble": "Scribble", "selectModel": "Select a model", "selectCLIPVisionModel": "Select a CLIP Vision model", "setControlImageDimensions": "Copy size to W/H (optimize for model)", @@ -917,6 +917,7 @@ "missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} missing input", "missingNodeTemplate": "Missing node template", "noControlImageForControlAdapter": "Control Adapter #{{number}} has no control image", + "imageNotProcessedForControlAdapter": "Control Adapter #{{number}}'s image is not processed", "noInitialImageSelected": "No initial image selected", "noModelForControlAdapter": "Control Adapter #{{number}} has no model selected.", "incompatibleBaseModelForControlAdapter": "Control Adapter #{{number}} model is incompatible with main model.", @@ -1542,6 +1543,8 @@ "globalControlAdapterLayer": "Global $t(controlnet.controlAdapter_one) $t(unifiedCanvas.layer)", "globalIPAdapter": "Global $t(common.ipAdapter)", "globalIPAdapterLayer": "Global $t(common.ipAdapter) $t(unifiedCanvas.layer)", - "opacityFilter": "Opacity Filter" + "opacityFilter": "Opacity Filter", + "clearProcessor": "Clear Processor", + "resetProcessor": "Reset Processor to Defaults" } } diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts index ac039c2df6..36040b5e41 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts @@ -16,7 +16,7 @@ import { addCanvasMaskSavedToGalleryListener } from 'app/store/middleware/listen import { addCanvasMaskToControlNetListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet'; import { addCanvasMergedListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMerged'; import { addCanvasSavedToGalleryListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasSavedToGallery'; -import { addControlLayersToControlAdapterBridge } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge'; +import { addControlAdapterPreprocessor } from 'app/store/middleware/listenerMiddleware/listeners/controlAdapterPreprocessor'; import { addControlNetAutoProcessListener } from 'app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess'; import { addControlNetImageProcessedListener } from 'app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed'; import { addEnqueueRequestedCanvasListener } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedCanvas'; @@ -158,5 +158,4 @@ addUpscaleRequestedListener(startAppListening); addDynamicPromptsListener(startAppListening); addSetDefaultSettingsListener(startAppListening); - -addControlLayersToControlAdapterBridge(startAppListening); +addControlAdapterPreprocessor(startAppListening); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterPreprocessor.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterPreprocessor.ts new file mode 100644 index 0000000000..50395dc9dc --- /dev/null +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterPreprocessor.ts @@ -0,0 +1,156 @@ +import { isAnyOf } from '@reduxjs/toolkit'; +import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; +import { parseify } from 'common/util/serialize'; +import { + caLayerImageChanged, + caLayerIsProcessingImageChanged, + caLayerModelChanged, + caLayerProcessedImageChanged, + caLayerProcessorConfigChanged, + isControlAdapterLayer, +} from 'features/controlLayers/store/controlLayersSlice'; +import { CONTROLNET_PROCESSORS } from 'features/controlLayers/util/controlAdapters'; +import { isImageOutput } from 'features/nodes/types/common'; +import { addToast } from 'features/system/store/systemSlice'; +import { t } from 'i18next'; +import { isEqual } from 'lodash-es'; +import { imagesApi } from 'services/api/endpoints/images'; +import { queueApi } from 'services/api/endpoints/queue'; +import type { BatchConfig, ImageDTO } from 'services/api/types'; +import { socketInvocationComplete } from 'services/events/actions'; + +const matcher = isAnyOf(caLayerImageChanged, caLayerProcessorConfigChanged, caLayerModelChanged); + +const DEBOUNCE_MS = 300; +const log = logger('session'); + +export const addControlAdapterPreprocessor = (startAppListening: AppStartListening) => { + startAppListening({ + matcher, + effect: async (action, { dispatch, getState, getOriginalState, cancelActiveListeners, delay, take }) => { + const { layerId } = action.payload; + const precheckLayerOriginal = getOriginalState() + .controlLayers.present.layers.filter(isControlAdapterLayer) + .find((l) => l.id === layerId); + const precheckLayer = getState() + .controlLayers.present.layers.filter(isControlAdapterLayer) + .find((l) => l.id === layerId); + + // Conditions to bail + const layerDoesNotExist = !precheckLayer; + const layerHasNoImage = !precheckLayer?.controlAdapter.image; + const layerHasNoProcessorConfig = !precheckLayer?.controlAdapter.processorConfig; + const layerIsAlreadyProcessingImage = precheckLayer?.controlAdapter.isProcessingImage; + const areImageAndProcessorUnchanged = + isEqual(precheckLayer?.controlAdapter.image, precheckLayerOriginal?.controlAdapter.image) && + isEqual(precheckLayer?.controlAdapter.processorConfig, precheckLayerOriginal?.controlAdapter.processorConfig); + + if ( + layerDoesNotExist || + layerHasNoImage || + layerHasNoProcessorConfig || + areImageAndProcessorUnchanged || + layerIsAlreadyProcessingImage + ) { + return; + } + + // Cancel any in-progress instances of this listener + cancelActiveListeners(); + log.trace('Control Layer CA auto-process triggered'); + + // Delay before starting actual work + await delay(DEBOUNCE_MS); + dispatch(caLayerIsProcessingImageChanged({ layerId, isProcessingImage: true })); + + // Double-check that we are still eligible for processing + const state = getState(); + const layer = state.controlLayers.present.layers.filter(isControlAdapterLayer).find((l) => l.id === layerId); + const image = layer?.controlAdapter.image; + const config = layer?.controlAdapter.processorConfig; + + // If we have no image or there is no processor config, bail + if (!layer || !image || !config) { + return; + } + + // @ts-expect-error: TS isn't able to narrow the typing of buildNode and `config` will error... + const processorNode = CONTROLNET_PROCESSORS[config.type].buildNode(image, config); + const enqueueBatchArg: BatchConfig = { + prepend: true, + batch: { + graph: { + nodes: { + [processorNode.id]: { ...processorNode, is_intermediate: true }, + }, + edges: [], + }, + runs: 1, + }, + }; + + try { + const req = dispatch( + queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, { + fixedCacheKey: 'enqueueBatch', + }) + ); + const enqueueResult = await req.unwrap(); + req.reset(); + log.debug({ enqueueResult: parseify(enqueueResult) }, t('queue.graphQueued')); + + const [invocationCompleteAction] = await take( + (action): action is ReturnType => + socketInvocationComplete.match(action) && + action.payload.data.queue_batch_id === enqueueResult.batch.batch_id && + action.payload.data.source_node_id === processorNode.id + ); + + // We still have to check the output type + if (isImageOutput(invocationCompleteAction.payload.data.result)) { + const { image_name } = invocationCompleteAction.payload.data.result.image; + + // Wait for the ImageDTO to be received + const [{ payload }] = await take( + (action) => + imagesApi.endpoints.getImageDTO.matchFulfilled(action) && action.payload.image_name === image_name + ); + + const imageDTO = payload as ImageDTO; + + log.debug({ layerId, imageDTO }, 'ControlNet image processed'); + + // Update the processed image in the store + dispatch( + caLayerProcessedImageChanged({ + layerId, + imageDTO, + }) + ); + dispatch(caLayerIsProcessingImageChanged({ layerId, isProcessingImage: false })); + } + } catch (error) { + console.log(error); + log.error({ enqueueBatchArg: parseify(enqueueBatchArg) }, t('queue.graphFailedToQueue')); + dispatch(caLayerIsProcessingImageChanged({ layerId, isProcessingImage: false })); + + if (error instanceof Object) { + if ('data' in error && 'status' in error) { + if (error.status === 403) { + dispatch(caLayerImageChanged({ layerId, imageDTO: null })); + return; + } + } + } + + dispatch( + addToast({ + title: t('queue.graphFailedToQueue'), + status: 'error', + }) + ); + } + }, + }); +}; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge.ts deleted file mode 100644 index bc14277f88..0000000000 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge.ts +++ /dev/null @@ -1,144 +0,0 @@ -import { createAction } from '@reduxjs/toolkit'; -import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; -import { CONTROLNET_PROCESSORS } from 'features/controlAdapters/store/constants'; -import { controlAdapterAdded, controlAdapterRemoved } from 'features/controlAdapters/store/controlAdaptersSlice'; -import type { ControlNetConfig, IPAdapterConfig } from 'features/controlAdapters/store/types'; -import { isControlAdapterProcessorType } from 'features/controlAdapters/store/types'; -import { - controlAdapterLayerAdded, - ipAdapterLayerAdded, - layerDeleted, - maskLayerIPAdapterAdded, - maskLayerIPAdapterDeleted, - regionalGuidanceLayerAdded, -} from 'features/controlLayers/store/controlLayersSlice'; -import type { Layer } from 'features/controlLayers/store/types'; -import { modelConfigsAdapterSelectors, modelsApi } from 'services/api/endpoints/models'; -import { isControlNetModelConfig, isIPAdapterModelConfig } from 'services/api/types'; -import { assert } from 'tsafe'; -import { v4 as uuidv4 } from 'uuid'; - -export const guidanceLayerAdded = createAction('controlLayers/guidanceLayerAdded'); -export const guidanceLayerDeleted = createAction('controlLayers/guidanceLayerDeleted'); -export const allLayersDeleted = createAction('controlLayers/allLayersDeleted'); -export const guidanceLayerIPAdapterAdded = createAction('controlLayers/guidanceLayerIPAdapterAdded'); -export const guidanceLayerIPAdapterDeleted = createAction<{ layerId: string; ipAdapterId: string }>( - 'controlLayers/guidanceLayerIPAdapterDeleted' -); - -export const addControlLayersToControlAdapterBridge = (startAppListening: AppStartListening) => { - startAppListening({ - actionCreator: guidanceLayerAdded, - effect: (action, { dispatch, getState }) => { - const type = action.payload; - const layerId = uuidv4(); - if (type === 'regional_guidance_layer') { - dispatch(regionalGuidanceLayerAdded({ layerId })); - return; - } - - const state = getState(); - const baseModel = state.generation.model?.base; - const modelConfigs = modelsApi.endpoints.getModelConfigs.select(undefined)(state).data; - - if (type === 'ip_adapter_layer') { - const ipAdapterId = uuidv4(); - const overrides: Partial = { - id: ipAdapterId, - }; - - // Find and select the first matching model - if (modelConfigs) { - const models = modelConfigsAdapterSelectors.selectAll(modelConfigs).filter(isIPAdapterModelConfig); - overrides.model = models.find((m) => m.base === baseModel) ?? null; - } - dispatch(controlAdapterAdded({ type: 'ip_adapter', overrides })); - dispatch(ipAdapterLayerAdded({ layerId, ipAdapterId })); - return; - } - - if (type === 'control_adapter_layer') { - const controlNetId = uuidv4(); - const overrides: Partial = { - id: controlNetId, - }; - - // Find and select the first matching model - if (modelConfigs) { - const models = modelConfigsAdapterSelectors.selectAll(modelConfigs).filter(isControlNetModelConfig); - const model = models.find((m) => m.base === baseModel) ?? null; - overrides.model = model; - const defaultPreprocessor = model?.default_settings?.preprocessor; - overrides.processorType = isControlAdapterProcessorType(defaultPreprocessor) ? defaultPreprocessor : 'none'; - overrides.processorNode = CONTROLNET_PROCESSORS[overrides.processorType].buildDefaults(baseModel); - } - dispatch(controlAdapterAdded({ type: 'controlnet', overrides })); - dispatch(controlAdapterLayerAdded({ layerId, controlNetId })); - return; - } - }, - }); - - startAppListening({ - actionCreator: guidanceLayerDeleted, - effect: (action, { getState, dispatch }) => { - const layerId = action.payload; - const state = getState(); - const layer = state.controlLayers.present.layers.find((l) => l.id === layerId); - assert(layer, `Layer ${layerId} not found`); - - if (layer.type === 'ip_adapter_layer') { - dispatch(controlAdapterRemoved({ id: layer.ipAdapterId })); - } else if (layer.type === 'control_adapter_layer') { - dispatch(controlAdapterRemoved({ id: layer.controlNetId })); - } else if (layer.type === 'regional_guidance_layer') { - for (const ipAdapterId of layer.ipAdapterIds) { - dispatch(controlAdapterRemoved({ id: ipAdapterId })); - } - } - dispatch(layerDeleted(layerId)); - }, - }); - - startAppListening({ - actionCreator: allLayersDeleted, - effect: (action, { dispatch, getOriginalState }) => { - const state = getOriginalState(); - for (const layer of state.controlLayers.present.layers) { - dispatch(guidanceLayerDeleted(layer.id)); - } - }, - }); - - startAppListening({ - actionCreator: guidanceLayerIPAdapterAdded, - effect: (action, { dispatch, getState }) => { - const layerId = action.payload; - const ipAdapterId = uuidv4(); - const overrides: Partial = { - id: ipAdapterId, - }; - - // Find and select the first matching model - const state = getState(); - const baseModel = state.generation.model?.base; - const modelConfigs = modelsApi.endpoints.getModelConfigs.select(undefined)(state).data; - if (modelConfigs) { - const models = modelConfigsAdapterSelectors.selectAll(modelConfigs).filter(isIPAdapterModelConfig); - overrides.model = models.find((m) => m.base === baseModel) ?? null; - } - - dispatch(controlAdapterAdded({ type: 'ip_adapter', overrides })); - dispatch(maskLayerIPAdapterAdded({ layerId, ipAdapterId })); - }, - }); - - startAppListening({ - actionCreator: guidanceLayerIPAdapterDeleted, - effect: (action, { dispatch }) => { - const { layerId, ipAdapterId } = action.payload; - dispatch(controlAdapterRemoved({ id: ipAdapterId })); - dispatch(maskLayerIPAdapterDeleted({ layerId, ipAdapterId })); - }, - }); -}; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts index 307e3487dd..de2ac3a39a 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts @@ -7,6 +7,11 @@ import { controlAdapterImageChanged, controlAdapterIsEnabledChanged, } from 'features/controlAdapters/store/controlAdaptersSlice'; +import { + caLayerImageChanged, + ipaLayerImageChanged, + rgLayerIPAdapterImageChanged, +} from 'features/controlLayers/store/controlLayersSlice'; import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types'; import { imageSelected } from 'features/gallery/store/gallerySlice'; import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice'; @@ -83,6 +88,61 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) => return; } + /** + * Image dropped on Control Adapter Layer + */ + if ( + overData.actionType === 'SET_CA_LAYER_IMAGE' && + activeData.payloadType === 'IMAGE_DTO' && + activeData.payload.imageDTO + ) { + const { layerId } = overData.context; + dispatch( + caLayerImageChanged({ + layerId, + imageDTO: activeData.payload.imageDTO, + }) + ); + return; + } + + /** + * Image dropped on IP Adapter Layer + */ + if ( + overData.actionType === 'SET_IPA_LAYER_IMAGE' && + activeData.payloadType === 'IMAGE_DTO' && + activeData.payload.imageDTO + ) { + const { layerId } = overData.context; + dispatch( + ipaLayerImageChanged({ + layerId, + imageDTO: activeData.payload.imageDTO, + }) + ); + return; + } + + /** + * Image dropped on RG Layer IP Adapter + */ + if ( + overData.actionType === 'SET_RG_LAYER_IP_ADAPTER_IMAGE' && + activeData.payloadType === 'IMAGE_DTO' && + activeData.payload.imageDTO + ) { + const { layerId, ipAdapterId } = overData.context; + dispatch( + rgLayerIPAdapterImageChanged({ + layerId, + ipAdapterId, + imageDTO: activeData.payload.imageDTO, + }) + ); + return; + } + /** * Image dropped on Canvas */ diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts index a2ca4baeb1..fd568ef1bd 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts @@ -6,6 +6,11 @@ import { controlAdapterImageChanged, controlAdapterIsEnabledChanged, } from 'features/controlAdapters/store/controlAdaptersSlice'; +import { + caLayerImageChanged, + ipaLayerImageChanged, + rgLayerIPAdapterImageChanged, +} from 'features/controlLayers/store/controlLayersSlice'; import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice'; import { initialImageChanged, selectOptimalDimension } from 'features/parameters/store/generationSlice'; import { addToast } from 'features/system/store/systemSlice'; @@ -108,6 +113,39 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis return; } + if (postUploadAction?.type === 'SET_CA_LAYER_IMAGE') { + const { layerId } = postUploadAction; + dispatch(caLayerImageChanged({ layerId, imageDTO })); + dispatch( + addToast({ + ...DEFAULT_UPLOADED_TOAST, + description: t('toast.setControlImage'), + }) + ); + } + + if (postUploadAction?.type === 'SET_IPA_LAYER_IMAGE') { + const { layerId } = postUploadAction; + dispatch(ipaLayerImageChanged({ layerId, imageDTO })); + dispatch( + addToast({ + ...DEFAULT_UPLOADED_TOAST, + description: t('toast.setControlImage'), + }) + ); + } + + if (postUploadAction?.type === 'SET_RG_LAYER_IP_ADAPTER_IMAGE') { + const { layerId, ipAdapterId } = postUploadAction; + dispatch(rgLayerIPAdapterImageChanged({ layerId, ipAdapterId, imageDTO })); + dispatch( + addToast({ + ...DEFAULT_UPLOADED_TOAST, + description: t('toast.setControlImage'), + }) + ); + } + if (postUploadAction?.type === 'SET_INITIAL_IMAGE') { dispatch(initialImageChanged(imageDTO)); dispatch( diff --git a/invokeai/frontend/web/src/common/hooks/useIsReadyToEnqueue.ts b/invokeai/frontend/web/src/common/hooks/useIsReadyToEnqueue.ts index d765e987eb..6073564305 100644 --- a/invokeai/frontend/web/src/common/hooks/useIsReadyToEnqueue.ts +++ b/invokeai/frontend/web/src/common/hooks/useIsReadyToEnqueue.ts @@ -16,6 +16,7 @@ import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; import i18n from 'i18next'; import { forEach } from 'lodash-es'; import { getConnectedEdges } from 'reactflow'; +import { assert } from 'tsafe'; const selector = createMemoizedSelector( [ @@ -97,71 +98,93 @@ const selector = createMemoizedSelector( reasons.push(i18n.t('parameters.invoke.noModelSelected')); } - let enabledControlAdapters = selectControlAdapterAll(controlAdapters).filter((ca) => ca.isEnabled); - if (activeTabName === 'txt2img') { - // Special handling for control layers on txt2img - const enabledControlLayersAdapterIds = controlLayers.present.layers + // Handling for Control Layers - only exists on txt2img tab now + controlLayers.present.layers .filter((l) => l.isEnabled) - .flatMap((layer) => { - if (layer.type === 'regional_guidance_layer') { - return layer.ipAdapterIds; + .flatMap((l) => { + if (l.type === 'control_adapter_layer') { + return l.controlAdapter; + } else if (l.type === 'ip_adapter_layer') { + return l.ipAdapter; + } else if (l.type === 'regional_guidance_layer') { + return l.ipAdapters; } - if (layer.type === 'control_adapter_layer') { - return [layer.controlNetId]; + assert(false); + }) + .forEach((ca, i) => { + const hasNoModel = !ca.model; + const mismatchedModelBase = ca.model?.base !== model?.base; + const hasNoImage = !ca.image; + const imageNotProcessed = + (ca.type === 'controlnet' || ca.type === 't2i_adapter') && !ca.processedImage && ca.processorConfig; + + if (hasNoModel) { + reasons.push( + i18n.t('parameters.invoke.noModelForControlAdapter', { + number: i + 1, + }) + ); } - if (layer.type === 'ip_adapter_layer') { - return [layer.ipAdapterId]; + if (mismatchedModelBase) { + // This should never happen, just a sanity check + reasons.push( + i18n.t('parameters.invoke.incompatibleBaseModelForControlAdapter', { + number: i + 1, + }) + ); + } + if (hasNoImage) { + reasons.push( + i18n.t('parameters.invoke.noControlImageForControlAdapter', { + number: i + 1, + }) + ); + } + if (imageNotProcessed) { + reasons.push( + i18n.t('parameters.invoke.imageNotProcessedForControlAdapter', { + number: i + 1, + }) + ); } }); - - enabledControlAdapters = enabledControlAdapters.filter((ca) => enabledControlLayersAdapterIds.includes(ca.id)); } else { - const allControlLayerAdapterIds = controlLayers.present.layers.flatMap((layer) => { - if (layer.type === 'regional_guidance_layer') { - return layer.ipAdapterIds; - } - if (layer.type === 'control_adapter_layer') { - return [layer.controlNetId]; - } - if (layer.type === 'ip_adapter_layer') { - return [layer.ipAdapterId]; - } - }); - enabledControlAdapters = enabledControlAdapters.filter((ca) => !allControlLayerAdapterIds.includes(ca.id)); + // Handling for all other tabs + selectControlAdapterAll(controlAdapters) + .filter((ca) => ca.isEnabled) + .forEach((ca, i) => { + if (!ca.isEnabled) { + return; + } + + if (!ca.model) { + reasons.push( + i18n.t('parameters.invoke.noModelForControlAdapter', { + number: i + 1, + }) + ); + } else if (ca.model.base !== model?.base) { + // This should never happen, just a sanity check + reasons.push( + i18n.t('parameters.invoke.incompatibleBaseModelForControlAdapter', { + number: i + 1, + }) + ); + } + + if ( + !ca.controlImage || + (isControlNetOrT2IAdapter(ca) && !ca.processedControlImage && ca.processorType !== 'none') + ) { + reasons.push( + i18n.t('parameters.invoke.noControlImageForControlAdapter', { + number: i + 1, + }) + ); + } + }); } - - enabledControlAdapters.forEach((ca, i) => { - if (!ca.isEnabled) { - return; - } - - if (!ca.model) { - reasons.push( - i18n.t('parameters.invoke.noModelForControlAdapter', { - number: i + 1, - }) - ); - } else if (ca.model.base !== model?.base) { - // This should never happen, just a sanity check - reasons.push( - i18n.t('parameters.invoke.incompatibleBaseModelForControlAdapter', { - number: i + 1, - }) - ); - } - - if ( - !ca.controlImage || - (isControlNetOrT2IAdapter(ca) && !ca.processedControlImage && ca.processorType !== 'none') - ) { - reasons.push( - i18n.t('parameters.invoke.noControlImageForControlAdapter', { - number: i + 1, - }) - ); - } - }); } return { isReady: !reasons.length, reasons }; diff --git a/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts b/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts index c316fee2b1..a22f23d9d3 100644 --- a/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts +++ b/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts @@ -8,6 +8,7 @@ import calculateScale from 'features/canvas/util/calculateScale'; import { STAGE_PADDING_PERCENTAGE } from 'features/canvas/util/constants'; import floorCoordinates from 'features/canvas/util/floorCoordinates'; import getScaledBoundingBoxDimensions from 'features/canvas/util/getScaledBoundingBoxDimensions'; +import { calculateNewSize } from 'features/parameters/components/ImageSize/calculateNewSize'; import { initialAspectRatioState } from 'features/parameters/components/ImageSize/constants'; import type { AspectRatioState } from 'features/parameters/components/ImageSize/types'; import { modelChanged } from 'features/parameters/store/generationSlice'; @@ -588,8 +589,9 @@ export const canvasSlice = createSlice({ }, extraReducers: (builder) => { builder.addCase(modelChanged, (state, action) => { - if (action.meta.previousModel?.base === action.payload?.base) { - // The base model hasn't changed, we don't need to optimize the size + const newModel = action.payload; + if (!newModel || action.meta.previousModel?.base === newModel.base) { + // Model was cleared or the base didn't change return; } const optimalDimension = getOptimalDimension(action.payload); @@ -597,14 +599,8 @@ export const canvasSlice = createSlice({ if (getIsSizeOptimal(width, height, optimalDimension)) { return; } - setBoundingBoxDimensionsReducer( - state, - { - width, - height, - }, - optimalDimension - ); + const newSize = calculateNewSize(state.aspectRatio.value, optimalDimension * optimalDimension); + setBoundingBoxDimensionsReducer(state, newSize, optimalDimension); }); builder.addCase(socketQueueItemStatusChanged, (state, action) => { diff --git a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts index 0c1ac20200..f8afde677c 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts +++ b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts @@ -1,5 +1,5 @@ import type { PayloadAction, Update } from '@reduxjs/toolkit'; -import { createEntityAdapter, createSlice, isAnyOf } from '@reduxjs/toolkit'; +import { createEntityAdapter, createSlice } from '@reduxjs/toolkit'; import { getSelectorsOptions } from 'app/store/createMemoizedSelector'; import type { PersistConfig, RootState } from 'app/store/store'; import { deepClone } from 'common/util/deepClone'; @@ -481,8 +481,6 @@ export const { t2iAdaptersReset, } = controlAdaptersSlice.actions; -export const isAnyControlAdapterAdded = isAnyOf(controlAdapterAdded, controlAdapterRecalled); - export const selectControlAdaptersSlice = (state: RootState) => state.controlAdapters; /* eslint-disable-next-line @typescript-eslint/no-explicit-any */ diff --git a/invokeai/frontend/web/src/features/controlLayers/components/AddLayerButton.tsx b/invokeai/frontend/web/src/features/controlLayers/components/AddLayerButton.tsx index b521153239..3eb97dddff 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/AddLayerButton.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/AddLayerButton.tsx @@ -1,6 +1,7 @@ import { Button, Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library'; -import { guidanceLayerAdded } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge'; import { useAppDispatch } from 'app/store/storeHooks'; +import { useAddCALayer, useAddIPALayer } from 'features/controlLayers/hooks/addLayerHooks'; +import { rgLayerAdded } from 'features/controlLayers/store/controlLayersSlice'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; import { PiPlusBold } from 'react-icons/pi'; @@ -8,14 +9,10 @@ import { PiPlusBold } from 'react-icons/pi'; export const AddLayerButton = memo(() => { const { t } = useTranslation(); const dispatch = useAppDispatch(); - const addRegionalGuidanceLayer = useCallback(() => { - dispatch(guidanceLayerAdded('regional_guidance_layer')); - }, [dispatch]); - const addControlAdapterLayer = useCallback(() => { - dispatch(guidanceLayerAdded('control_adapter_layer')); - }, [dispatch]); - const addIPAdapterLayer = useCallback(() => { - dispatch(guidanceLayerAdded('ip_adapter_layer')); + const [addCALayer, isAddCALayerDisabled] = useAddCALayer(); + const [addIPALayer, isAddIPALayerDisabled] = useAddIPALayer(); + const addRGLayer = useCallback(() => { + dispatch(rgLayerAdded()); }, [dispatch]); return ( @@ -24,13 +21,13 @@ export const AddLayerButton = memo(() => { {t('controlLayers.addLayer')} - } onClick={addRegionalGuidanceLayer}> + } onClick={addRGLayer}> {t('controlLayers.regionalGuidanceLayer')} - } onClick={addControlAdapterLayer}> + } onClick={addCALayer} isDisabled={isAddCALayerDisabled}> {t('controlLayers.globalControlAdapterLayer')} - } onClick={addIPAdapterLayer}> + } onClick={addIPALayer} isDisabled={isAddIPALayerDisabled}> {t('controlLayers.globalIPAdapterLayer')} diff --git a/invokeai/frontend/web/src/features/controlLayers/components/AddPromptButtons.tsx b/invokeai/frontend/web/src/features/controlLayers/components/AddPromptButtons.tsx index 88eac207b2..26d9c8ce69 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/AddPromptButtons.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/AddPromptButtons.tsx @@ -1,11 +1,11 @@ import { Button, Flex } from '@invoke-ai/ui-library'; import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; -import { guidanceLayerIPAdapterAdded } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { useAddIPAdapterToIPALayer } from 'features/controlLayers/hooks/addLayerHooks'; import { isRegionalGuidanceLayer, - maskLayerNegativePromptChanged, - maskLayerPositivePromptChanged, + rgLayerNegativePromptChanged, + rgLayerPositivePromptChanged, selectControlLayersSlice, } from 'features/controlLayers/store/controlLayersSlice'; import { useCallback, useMemo } from 'react'; @@ -19,6 +19,7 @@ type AddPromptButtonProps = { export const AddPromptButtons = ({ layerId }: AddPromptButtonProps) => { const { t } = useTranslation(); const dispatch = useAppDispatch(); + const [addIPAdapter, isAddIPAdapterDisabled] = useAddIPAdapterToIPALayer(layerId); const selectValidActions = useMemo( () => createMemoizedSelector(selectControlLayersSlice, (controlLayers) => { @@ -33,13 +34,10 @@ export const AddPromptButtons = ({ layerId }: AddPromptButtonProps) => { ); const validActions = useAppSelector(selectValidActions); const addPositivePrompt = useCallback(() => { - dispatch(maskLayerPositivePromptChanged({ layerId, prompt: '' })); + dispatch(rgLayerPositivePromptChanged({ layerId, prompt: '' })); }, [dispatch, layerId]); const addNegativePrompt = useCallback(() => { - dispatch(maskLayerNegativePromptChanged({ layerId, prompt: '' })); - }, [dispatch, layerId]); - const addIPAdapter = useCallback(() => { - dispatch(guidanceLayerIPAdapterAdded(layerId)); + dispatch(rgLayerNegativePromptChanged({ layerId, prompt: '' })); }, [dispatch, layerId]); return ( @@ -62,7 +60,13 @@ export const AddPromptButtons = ({ layerId }: AddPromptButtonProps) => { > {t('common.negativePrompt')} - diff --git a/invokeai/frontend/web/src/features/controlLayers/components/CALayerListItem.tsx b/invokeai/frontend/web/src/features/controlLayers/components/CALayer/CALayer.tsx similarity index 54% rename from invokeai/frontend/web/src/features/controlLayers/components/CALayerListItem.tsx rename to invokeai/frontend/web/src/features/controlLayers/components/CALayer/CALayer.tsx index f97546c4fe..24de817df2 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/CALayerListItem.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/CALayer/CALayer.tsx @@ -1,39 +1,22 @@ import { Flex, Spacer, useDisclosure } from '@invoke-ai/ui-library'; -import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import CALayerOpacity from 'features/controlLayers/components/CALayerOpacity'; -import ControlAdapterLayerConfig from 'features/controlLayers/components/controlAdapterOverrides/ControlAdapterLayerConfig'; -import { LayerDeleteButton } from 'features/controlLayers/components/LayerDeleteButton'; -import { LayerMenu } from 'features/controlLayers/components/LayerMenu'; -import { LayerTitle } from 'features/controlLayers/components/LayerTitle'; -import { LayerVisibilityToggle } from 'features/controlLayers/components/LayerVisibilityToggle'; -import { - isControlAdapterLayer, - layerSelected, - selectControlLayersSlice, -} from 'features/controlLayers/store/controlLayersSlice'; -import { memo, useCallback, useMemo } from 'react'; -import { assert } from 'tsafe'; +import { CALayerControlAdapterWrapper } from 'features/controlLayers/components/CALayer/CALayerControlAdapterWrapper'; +import { LayerDeleteButton } from 'features/controlLayers/components/LayerCommon/LayerDeleteButton'; +import { LayerMenu } from 'features/controlLayers/components/LayerCommon/LayerMenu'; +import { LayerTitle } from 'features/controlLayers/components/LayerCommon/LayerTitle'; +import { LayerVisibilityToggle } from 'features/controlLayers/components/LayerCommon/LayerVisibilityToggle'; +import { layerSelected, selectCALayerOrThrow } from 'features/controlLayers/store/controlLayersSlice'; +import { memo, useCallback } from 'react'; + +import CALayerOpacity from './CALayerOpacity'; type Props = { layerId: string; }; -export const CALayerListItem = memo(({ layerId }: Props) => { +export const CALayer = memo(({ layerId }: Props) => { const dispatch = useAppDispatch(); - const selector = useMemo( - () => - createMemoizedSelector(selectControlLayersSlice, (controlLayers) => { - const layer = controlLayers.present.layers.find((l) => l.id === layerId); - assert(isControlAdapterLayer(layer), `Layer ${layerId} not found or not a ControlNet layer`); - return { - controlNetId: layer.controlNetId, - isSelected: layerId === controlLayers.present.selectedLayerId, - }; - }), - [layerId] - ); - const { controlNetId, isSelected } = useAppSelector(selector); + const isSelected = useAppSelector((s) => selectCALayerOrThrow(s.controlLayers.present, layerId).isSelected); const onClickCapture = useCallback(() => { // Must be capture so that the layer is selected before deleting/resetting/etc dispatch(layerSelected(layerId)); @@ -60,7 +43,7 @@ export const CALayerListItem = memo(({ layerId }: Props) => { {isOpen && ( - + )} @@ -68,4 +51,4 @@ export const CALayerListItem = memo(({ layerId }: Props) => { ); }); -CALayerListItem.displayName = 'CALayerListItem'; +CALayer.displayName = 'CALayer'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/CALayer/CALayerControlAdapterWrapper.tsx b/invokeai/frontend/web/src/features/controlLayers/components/CALayer/CALayerControlAdapterWrapper.tsx new file mode 100644 index 0000000000..6793a33f69 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/CALayer/CALayerControlAdapterWrapper.tsx @@ -0,0 +1,121 @@ +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { ControlAdapter } from 'features/controlLayers/components/ControlAndIPAdapter/ControlAdapter'; +import { + caLayerControlModeChanged, + caLayerImageChanged, + caLayerModelChanged, + caLayerProcessorConfigChanged, + caOrIPALayerBeginEndStepPctChanged, + caOrIPALayerWeightChanged, + selectCALayerOrThrow, +} from 'features/controlLayers/store/controlLayersSlice'; +import type { ControlMode, ProcessorConfig } from 'features/controlLayers/util/controlAdapters'; +import type { CALayerImageDropData } from 'features/dnd/types'; +import { memo, useCallback, useMemo } from 'react'; +import type { + CALayerImagePostUploadAction, + ControlNetModelConfig, + ImageDTO, + T2IAdapterModelConfig, +} from 'services/api/types'; + +type Props = { + layerId: string; +}; + +export const CALayerControlAdapterWrapper = memo(({ layerId }: Props) => { + const dispatch = useAppDispatch(); + const controlAdapter = useAppSelector((s) => selectCALayerOrThrow(s.controlLayers.present, layerId).controlAdapter); + + const onChangeBeginEndStepPct = useCallback( + (beginEndStepPct: [number, number]) => { + dispatch( + caOrIPALayerBeginEndStepPctChanged({ + layerId, + beginEndStepPct, + }) + ); + }, + [dispatch, layerId] + ); + + const onChangeControlMode = useCallback( + (controlMode: ControlMode) => { + dispatch( + caLayerControlModeChanged({ + layerId, + controlMode, + }) + ); + }, + [dispatch, layerId] + ); + + const onChangeWeight = useCallback( + (weight: number) => { + dispatch(caOrIPALayerWeightChanged({ layerId, weight })); + }, + [dispatch, layerId] + ); + + const onChangeProcessorConfig = useCallback( + (processorConfig: ProcessorConfig | null) => { + dispatch(caLayerProcessorConfigChanged({ layerId, processorConfig })); + }, + [dispatch, layerId] + ); + + const onChangeModel = useCallback( + (modelConfig: ControlNetModelConfig | T2IAdapterModelConfig) => { + dispatch( + caLayerModelChanged({ + layerId, + modelConfig, + }) + ); + }, + [dispatch, layerId] + ); + + const onChangeImage = useCallback( + (imageDTO: ImageDTO | null) => { + dispatch(caLayerImageChanged({ layerId, imageDTO })); + }, + [dispatch, layerId] + ); + + const droppableData = useMemo( + () => ({ + actionType: 'SET_CA_LAYER_IMAGE', + context: { + layerId, + }, + id: layerId, + }), + [layerId] + ); + + const postUploadAction = useMemo( + () => ({ + layerId, + type: 'SET_CA_LAYER_IMAGE', + }), + [layerId] + ); + + return ( + + ); +}); + +CALayerControlAdapterWrapper.displayName = 'CALayerControlAdapterWrapper'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/CALayerOpacity.tsx b/invokeai/frontend/web/src/features/controlLayers/components/CALayer/CALayerOpacity.tsx similarity index 90% rename from invokeai/frontend/web/src/features/controlLayers/components/CALayerOpacity.tsx rename to invokeai/frontend/web/src/features/controlLayers/components/CALayer/CALayerOpacity.tsx index a6107da1ec..31c8d81853 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/CALayerOpacity.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/CALayer/CALayerOpacity.tsx @@ -15,7 +15,7 @@ import { import { useAppDispatch } from 'app/store/storeHooks'; import { stopPropagation } from 'common/util/stopPropagation'; import { useLayerOpacity } from 'features/controlLayers/hooks/layerStateHooks'; -import { isFilterEnabledChanged, layerOpacityChanged } from 'features/controlLayers/store/controlLayersSlice'; +import { caLayerIsFilterEnabledChanged, caLayerOpacityChanged } from 'features/controlLayers/store/controlLayersSlice'; import type { ChangeEvent } from 'react'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; @@ -34,13 +34,13 @@ const CALayerOpacity = ({ layerId }: Props) => { const { opacity, isFilterEnabled } = useLayerOpacity(layerId); const onChangeOpacity = useCallback( (v: number) => { - dispatch(layerOpacityChanged({ layerId, opacity: v / 100 })); + dispatch(caLayerOpacityChanged({ layerId, opacity: v / 100 })); }, [dispatch, layerId] ); const onChangeFilter = useCallback( (e: ChangeEvent) => { - dispatch(isFilterEnabledChanged({ layerId, isFilterEnabled: e.target.checked })); + dispatch(caLayerIsFilterEnabledChanged({ layerId, isFilterEnabled: e.target.checked })); }, [dispatch, layerId] ); diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapter.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapter.tsx new file mode 100644 index 0000000000..087f634d73 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapter.tsx @@ -0,0 +1,117 @@ +import { Box, Divider, Flex, Icon, IconButton } from '@invoke-ai/ui-library'; +import { ControlAdapterModelCombobox } from 'features/controlLayers/components/ControlAndIPAdapter/ControlAdapterModelCombobox'; +import type { + ControlMode, + ControlNetConfig, + ProcessorConfig, + T2IAdapterConfig, +} from 'features/controlLayers/util/controlAdapters'; +import type { TypesafeDroppableData } from 'features/dnd/types'; +import { memo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiCaretUpBold } from 'react-icons/pi'; +import { useToggle } from 'react-use'; +import type { ControlNetModelConfig, ImageDTO, PostUploadAction, T2IAdapterModelConfig } from 'services/api/types'; + +import { ControlAdapterBeginEndStepPct } from './ControlAdapterBeginEndStepPct'; +import { ControlAdapterControlModeSelect } from './ControlAdapterControlModeSelect'; +import { ControlAdapterImagePreview } from './ControlAdapterImagePreview'; +import { ControlAdapterProcessorConfig } from './ControlAdapterProcessorConfig'; +import { ControlAdapterProcessorTypeSelect } from './ControlAdapterProcessorTypeSelect'; +import { ControlAdapterWeight } from './ControlAdapterWeight'; + +type Props = { + controlAdapter: ControlNetConfig | T2IAdapterConfig; + onChangeBeginEndStepPct: (beginEndStepPct: [number, number]) => void; + onChangeControlMode: (controlMode: ControlMode) => void; + onChangeWeight: (weight: number) => void; + onChangeProcessorConfig: (processorConfig: ProcessorConfig | null) => void; + onChangeModel: (modelConfig: ControlNetModelConfig | T2IAdapterModelConfig) => void; + onChangeImage: (imageDTO: ImageDTO | null) => void; + droppableData: TypesafeDroppableData; + postUploadAction: PostUploadAction; +}; + +export const ControlAdapter = memo( + ({ + controlAdapter, + onChangeBeginEndStepPct, + onChangeControlMode, + onChangeWeight, + onChangeProcessorConfig, + onChangeModel, + onChangeImage, + droppableData, + postUploadAction, + }: Props) => { + const { t } = useTranslation(); + const [isExpanded, toggleIsExpanded] = useToggle(false); + + return ( + + + + + + + + } + /> + + + + {controlAdapter.type === 'controlnet' && ( + + )} + + + + + + + + {isExpanded && ( + <> + + + + + + + )} + + ); + } +); + +ControlAdapter.displayName = 'ControlAdapter'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterBeginEndStepPct.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterBeginEndStepPct.tsx new file mode 100644 index 0000000000..9da9ce50a0 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterBeginEndStepPct.tsx @@ -0,0 +1,43 @@ +import { CompositeRangeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library'; +import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; + +type Props = { + beginEndStepPct: [number, number]; + onChange: (beginEndStepPct: [number, number]) => void; +}; + +const formatPct = (v: number) => `${Math.round(v * 100)}%`; +const ariaLabel = ['Begin Step %', 'End Step %']; + +export const ControlAdapterBeginEndStepPct = memo(({ beginEndStepPct, onChange }: Props) => { + const { t } = useTranslation(); + const onReset = useCallback(() => { + onChange([0, 1]); + }, [onChange]); + + return ( + + + {t('controlnet.beginEndStepPercentShort')} + + + + ); +}); + +ControlAdapterBeginEndStepPct.displayName = 'ControlAdapterBeginEndStepPct'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ParamControlAdapterControlMode.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterControlModeSelect.tsx similarity index 52% rename from invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ParamControlAdapterControlMode.tsx rename to invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterControlModeSelect.tsx index 6b5d34c106..34f4c85467 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ParamControlAdapterControlMode.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterControlModeSelect.tsx @@ -1,24 +1,19 @@ import type { ComboboxOnChange } from '@invoke-ai/ui-library'; import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library'; -import { useAppDispatch } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; -import { useControlAdapterControlMode } from 'features/controlAdapters/hooks/useControlAdapterControlMode'; -import { useControlAdapterIsEnabled } from 'features/controlAdapters/hooks/useControlAdapterIsEnabled'; -import { controlAdapterControlModeChanged } from 'features/controlAdapters/store/controlAdaptersSlice'; -import type { ControlMode } from 'features/controlAdapters/store/types'; +import type { ControlMode } from 'features/controlLayers/util/controlAdapters'; +import { isControlMode } from 'features/controlLayers/util/controlAdapters'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; +import { assert } from 'tsafe'; type Props = { - id: string; + controlMode: ControlMode; + onChange: (controlMode: ControlMode) => void; }; -const ParamControlAdapterControlMode = ({ id }: Props) => { - const isEnabled = useControlAdapterIsEnabled(id); - const controlMode = useControlAdapterControlMode(id); - const dispatch = useAppDispatch(); +export const ControlAdapterControlModeSelect = memo(({ controlMode, onChange }: Props) => { const { t } = useTranslation(); - const CONTROL_MODE_DATA = useMemo( () => [ { label: t('controlnet.balanced'), value: 'balanced' }, @@ -31,17 +26,10 @@ const ParamControlAdapterControlMode = ({ id }: Props) => { const handleControlModeChange = useCallback( (v) => { - if (!v) { - return; - } - dispatch( - controlAdapterControlModeChanged({ - id, - controlMode: v.value as ControlMode, - }) - ); + assert(isControlMode(v?.value)); + onChange(v.value); }, - [id, dispatch] + [onChange] ); const value = useMemo( @@ -54,13 +42,19 @@ const ParamControlAdapterControlMode = ({ id }: Props) => { } return ( - + {t('controlnet.control')} - + ); -}; +}); -export default memo(ParamControlAdapterControlMode); +ControlAdapterControlModeSelect.displayName = 'ControlAdapterControlModeSelect'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterImagePreview.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterImagePreview.tsx new file mode 100644 index 0000000000..7def6b2b56 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterImagePreview.tsx @@ -0,0 +1,215 @@ +import type { SystemStyleObject } from '@invoke-ai/ui-library'; +import { Box, Flex, Spinner, useShiftModifier } from '@invoke-ai/ui-library'; +import { skipToken } from '@reduxjs/toolkit/query'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import IAIDndImage from 'common/components/IAIDndImage'; +import IAIDndImageIcon from 'common/components/IAIDndImageIcon'; +import { setBoundingBoxDimensions } from 'features/canvas/store/canvasSlice'; +import { heightChanged, widthChanged } from 'features/controlLayers/store/controlLayersSlice'; +import type { ControlNetConfig, T2IAdapterConfig } from 'features/controlLayers/util/controlAdapters'; +import type { ImageDraggableData, TypesafeDroppableData } from 'features/dnd/types'; +import { calculateNewSize } from 'features/parameters/components/ImageSize/calculateNewSize'; +import { selectOptimalDimension } from 'features/parameters/store/generationSlice'; +import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; +import { memo, useCallback, useEffect, useMemo, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiArrowCounterClockwiseBold, PiFloppyDiskBold, PiRulerBold } from 'react-icons/pi'; +import { + useAddImageToBoardMutation, + useChangeImageIsIntermediateMutation, + useGetImageDTOQuery, + useRemoveImageFromBoardMutation, +} from 'services/api/endpoints/images'; +import type { ImageDTO, PostUploadAction } from 'services/api/types'; + +type Props = { + controlAdapter: ControlNetConfig | T2IAdapterConfig; + onChangeImage: (imageDTO: ImageDTO | null) => void; + droppableData: TypesafeDroppableData; + postUploadAction: PostUploadAction; +}; + +export const ControlAdapterImagePreview = memo( + ({ controlAdapter, onChangeImage, droppableData, postUploadAction }: Props) => { + const { t } = useTranslation(); + const dispatch = useAppDispatch(); + const autoAddBoardId = useAppSelector((s) => s.gallery.autoAddBoardId); + const isConnected = useAppSelector((s) => s.system.isConnected); + const activeTabName = useAppSelector(activeTabNameSelector); + const optimalDimension = useAppSelector(selectOptimalDimension); + const shift = useShiftModifier(); + + const [isMouseOverImage, setIsMouseOverImage] = useState(false); + + const { currentData: controlImage, isError: isErrorControlImage } = useGetImageDTOQuery( + controlAdapter.image?.imageName ?? skipToken + ); + const { currentData: processedControlImage, isError: isErrorProcessedControlImage } = useGetImageDTOQuery( + controlAdapter.processedImage?.imageName ?? skipToken + ); + + const [changeIsIntermediate] = useChangeImageIsIntermediateMutation(); + const [addToBoard] = useAddImageToBoardMutation(); + const [removeFromBoard] = useRemoveImageFromBoardMutation(); + const handleResetControlImage = useCallback(() => { + onChangeImage(null); + }, [onChangeImage]); + + const handleSaveControlImage = useCallback(async () => { + if (!processedControlImage) { + return; + } + + await changeIsIntermediate({ + imageDTO: processedControlImage, + is_intermediate: false, + }).unwrap(); + + if (autoAddBoardId !== 'none') { + addToBoard({ + imageDTO: processedControlImage, + board_id: autoAddBoardId, + }); + } else { + removeFromBoard({ imageDTO: processedControlImage }); + } + }, [processedControlImage, changeIsIntermediate, autoAddBoardId, addToBoard, removeFromBoard]); + + const handleSetControlImageToDimensions = useCallback(() => { + if (!controlImage) { + return; + } + + if (activeTabName === 'unifiedCanvas') { + dispatch( + setBoundingBoxDimensions({ width: controlImage.width, height: controlImage.height }, optimalDimension) + ); + } else { + if (shift) { + const { width, height } = controlImage; + dispatch(widthChanged({ width, updateAspectRatio: true })); + dispatch(heightChanged({ height, updateAspectRatio: true })); + } else { + const { width, height } = calculateNewSize( + controlImage.width / controlImage.height, + optimalDimension * optimalDimension + ); + dispatch(widthChanged({ width, updateAspectRatio: true })); + dispatch(heightChanged({ height, updateAspectRatio: true })); + } + } + }, [controlImage, activeTabName, dispatch, optimalDimension, shift]); + + const handleMouseEnter = useCallback(() => { + setIsMouseOverImage(true); + }, []); + + const handleMouseLeave = useCallback(() => { + setIsMouseOverImage(false); + }, []); + + const draggableData = useMemo(() => { + if (controlImage) { + return { + id: controlAdapter.id, + payloadType: 'IMAGE_DTO', + payload: { imageDTO: controlImage }, + }; + } + }, [controlImage, controlAdapter.id]); + + const shouldShowProcessedImage = + controlImage && + processedControlImage && + !isMouseOverImage && + !controlAdapter.isProcessingImage && + controlAdapter.processorConfig !== null; + + useEffect(() => { + if (isConnected && (isErrorControlImage || isErrorProcessedControlImage)) { + handleResetControlImage(); + } + }, [handleResetControlImage, isConnected, isErrorControlImage, isErrorProcessedControlImage]); + + return ( + + + + + + + + <> + : undefined} + tooltip={t('controlnet.resetControlImage')} + /> + : undefined} + tooltip={t('controlnet.saveControlImage')} + styleOverrides={saveControlImageStyleOverrides} + /> + : undefined} + tooltip={shift ? t('controlnet.setControlImageDimensionsForce') : t('controlnet.setControlImageDimensions')} + styleOverrides={setControlImageDimensionsStyleOverrides} + /> + + + {controlAdapter.isProcessingImage && ( + + + + )} + + ); + } +); + +ControlAdapterImagePreview.displayName = 'ControlAdapterImagePreview'; + +const saveControlImageStyleOverrides: SystemStyleObject = { mt: 6 }; +const setControlImageDimensionsStyleOverrides: SystemStyleObject = { mt: 12 }; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterModelCombobox.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterModelCombobox.tsx new file mode 100644 index 0000000000..a4b1d6b744 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterModelCombobox.tsx @@ -0,0 +1,62 @@ +import { Combobox, FormControl, Tooltip } from '@invoke-ai/ui-library'; +import { useAppSelector } from 'app/store/storeHooks'; +import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox'; +import { memo, useCallback, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { useControlNetAndT2IAdapterModels } from 'services/api/hooks/modelsByType'; +import type { AnyModelConfig, ControlNetModelConfig, T2IAdapterModelConfig } from 'services/api/types'; + +type Props = { + modelKey: string | null; + onChange: (modelConfig: ControlNetModelConfig | T2IAdapterModelConfig) => void; +}; + +export const ControlAdapterModelCombobox = memo(({ modelKey, onChange: onChangeModel }: Props) => { + const { t } = useTranslation(); + const currentBaseModel = useAppSelector((s) => s.generation.model?.base); + const [modelConfigs, { isLoading }] = useControlNetAndT2IAdapterModels(); + const selectedModel = useMemo(() => modelConfigs.find((m) => m.key === modelKey), [modelConfigs, modelKey]); + + const _onChange = useCallback( + (modelConfig: ControlNetModelConfig | T2IAdapterModelConfig | null) => { + if (!modelConfig) { + return; + } + onChangeModel(modelConfig); + }, + [onChangeModel] + ); + + const getIsDisabled = useCallback( + (model: AnyModelConfig): boolean => { + const isCompatible = currentBaseModel === model.base; + const hasMainModel = Boolean(currentBaseModel); + return !hasMainModel || !isCompatible; + }, + [currentBaseModel] + ); + + const { options, value, onChange, noOptionsMessage } = useGroupedModelCombobox({ + modelConfigs, + onChange: _onChange, + selectedModel, + getIsDisabled, + isLoading, + }); + + return ( + + + + + + ); +}); + +ControlAdapterModelCombobox.displayName = 'ControlAdapterModelCombobox'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterProcessorConfig.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterProcessorConfig.tsx new file mode 100644 index 0000000000..034dc5454e --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterProcessorConfig.tsx @@ -0,0 +1,85 @@ +import type { ProcessorConfig } from 'features/controlLayers/util/controlAdapters'; +import { memo } from 'react'; + +import { CannyProcessor } from './processors/CannyProcessor'; +import { ColorMapProcessor } from './processors/ColorMapProcessor'; +import { ContentShuffleProcessor } from './processors/ContentShuffleProcessor'; +import { DepthAnythingProcessor } from './processors/DepthAnythingProcessor'; +import { DWOpenposeProcessor } from './processors/DWOpenposeProcessor'; +import { HedProcessor } from './processors/HedProcessor'; +import { LineartProcessor } from './processors/LineartProcessor'; +import { MediapipeFaceProcessor } from './processors/MediapipeFaceProcessor'; +import { MidasDepthProcessor } from './processors/MidasDepthProcessor'; +import { MlsdImageProcessor } from './processors/MlsdImageProcessor'; +import { PidiProcessor } from './processors/PidiProcessor'; + +type Props = { + config: ProcessorConfig | null; + onChange: (config: ProcessorConfig | null) => void; +}; + +export const ControlAdapterProcessorConfig = memo(({ config, onChange }: Props) => { + if (!config) { + return null; + } + + if (config.type === 'canny_image_processor') { + return ; + } + + if (config.type === 'color_map_image_processor') { + return ; + } + + if (config.type === 'depth_anything_image_processor') { + return ; + } + + if (config.type === 'hed_image_processor') { + return ; + } + + if (config.type === 'lineart_image_processor') { + return ; + } + + if (config.type === 'content_shuffle_image_processor') { + return ; + } + + if (config.type === 'lineart_anime_image_processor') { + // No configurable options for this processor + return null; + } + + if (config.type === 'mediapipe_face_processor') { + return ; + } + + if (config.type === 'midas_depth_image_processor') { + return ; + } + + if (config.type === 'mlsd_image_processor') { + return ; + } + + if (config.type === 'normalbae_image_processor') { + // No configurable options for this processor + return null; + } + + if (config.type === 'dw_openpose_image_processor') { + return ; + } + + if (config.type === 'pidi_image_processor') { + return ; + } + + if (config.type === 'zoe_depth_image_processor') { + return null; + } +}); + +ControlAdapterProcessorConfig.displayName = 'ControlAdapterProcessorConfig'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterProcessorTypeSelect.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterProcessorTypeSelect.tsx new file mode 100644 index 0000000000..46e6131353 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterProcessorTypeSelect.tsx @@ -0,0 +1,70 @@ +import type { ComboboxOnChange } from '@invoke-ai/ui-library'; +import { Combobox, Flex, FormControl, FormLabel, IconButton } from '@invoke-ai/ui-library'; +import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; +import { useAppSelector } from 'app/store/storeHooks'; +import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; +import type { ProcessorConfig } from 'features/controlLayers/util/controlAdapters'; +import { CONTROLNET_PROCESSORS, isProcessorType } from 'features/controlLayers/util/controlAdapters'; +import { configSelector } from 'features/system/store/configSelectors'; +import { includes, map } from 'lodash-es'; +import { memo, useCallback, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiXBold } from 'react-icons/pi'; +import { assert } from 'tsafe'; + +type Props = { + config: ProcessorConfig | null; + onChange: (config: ProcessorConfig | null) => void; +}; + +const selectDisabledProcessors = createMemoizedSelector( + configSelector, + (config) => config.sd.disabledControlNetProcessors +); + +export const ControlAdapterProcessorTypeSelect = memo(({ config, onChange }: Props) => { + const { t } = useTranslation(); + const disabledProcessors = useAppSelector(selectDisabledProcessors); + const options = useMemo(() => { + return map(CONTROLNET_PROCESSORS, ({ labelTKey }, type) => ({ value: type, label: t(labelTKey) })).filter( + (o) => !includes(disabledProcessors, o.value) + ); + }, [disabledProcessors, t]); + + const _onChange = useCallback( + (v) => { + if (!v) { + onChange(null); + } else { + assert(isProcessorType(v.value)); + onChange(CONTROLNET_PROCESSORS[v.value].buildDefaults()); + } + }, + [onChange] + ); + const clearProcessor = useCallback(() => { + onChange(null); + }, [onChange]); + const value = useMemo(() => options.find((o) => o.value === config?.type) ?? null, [options, config?.type]); + + return ( + + + + {t('controlnet.processor')} + + + + } + variant="ghost" + size="sm" + /> + + ); +}); + +ControlAdapterProcessorTypeSelect.displayName = 'ControlAdapterProcessorTypeSelect'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ParamControlAdapterWeight.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterWeight.tsx similarity index 60% rename from invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ParamControlAdapterWeight.tsx rename to invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterWeight.tsx index 5e456fc792..4bb7bb3911 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ParamControlAdapterWeight.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/ControlAdapterWeight.tsx @@ -1,24 +1,19 @@ import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library'; -import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { useAppSelector } from 'app/store/storeHooks'; import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; -import { useControlAdapterIsEnabled } from 'features/controlAdapters/hooks/useControlAdapterIsEnabled'; -import { useControlAdapterWeight } from 'features/controlAdapters/hooks/useControlAdapterWeight'; -import { controlAdapterWeightChanged } from 'features/controlAdapters/store/controlAdaptersSlice'; -import { isNil } from 'lodash-es'; -import { memo, useCallback } from 'react'; +import { memo } from 'react'; import { useTranslation } from 'react-i18next'; -type ParamControlAdapterWeightProps = { - id: string; +type Props = { + weight: number; + onChange: (weight: number) => void; }; const formatValue = (v: number) => v.toFixed(2); +const marks = [0, 1, 2]; -const ParamControlAdapterWeight = ({ id }: ParamControlAdapterWeightProps) => { +export const ControlAdapterWeight = memo(({ weight, onChange }: Props) => { const { t } = useTranslation(); - const dispatch = useAppDispatch(); - const isEnabled = useControlAdapterIsEnabled(id); - const weight = useControlAdapterWeight(id); const initial = useAppSelector((s) => s.config.sd.ca.weight.initial); const sliderMin = useAppSelector((s) => s.config.sd.ca.weight.sliderMin); const sliderMax = useAppSelector((s) => s.config.sd.ca.weight.sliderMax); @@ -27,20 +22,8 @@ const ParamControlAdapterWeight = ({ id }: ParamControlAdapterWeightProps) => { const coarseStep = useAppSelector((s) => s.config.sd.ca.weight.coarseStep); const fineStep = useAppSelector((s) => s.config.sd.ca.weight.fineStep); - const onChange = useCallback( - (weight: number) => { - dispatch(controlAdapterWeightChanged({ id, weight })); - }, - [dispatch, id] - ); - - if (isNil(weight)) { - // should never happen - return null; - } - return ( - + {t('controlnet.weight')} @@ -67,8 +50,6 @@ const ParamControlAdapterWeight = ({ id }: ParamControlAdapterWeightProps) => { /> ); -}; +}); -export default memo(ParamControlAdapterWeight); - -const marks = [0, 1, 2]; +ControlAdapterWeight.displayName = 'ControlAdapterWeight'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/IPAdapter.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/IPAdapter.tsx new file mode 100644 index 0000000000..a0aa7d79a1 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/IPAdapter.tsx @@ -0,0 +1,72 @@ +import { Box, Flex } from '@invoke-ai/ui-library'; +import { ControlAdapterBeginEndStepPct } from 'features/controlLayers/components/ControlAndIPAdapter/ControlAdapterBeginEndStepPct'; +import { ControlAdapterWeight } from 'features/controlLayers/components/ControlAndIPAdapter/ControlAdapterWeight'; +import { IPAdapterImagePreview } from 'features/controlLayers/components/ControlAndIPAdapter/IPAdapterImagePreview'; +import { IPAdapterMethod } from 'features/controlLayers/components/ControlAndIPAdapter/IPAdapterMethod'; +import { IPAdapterModelSelect } from 'features/controlLayers/components/ControlAndIPAdapter/IPAdapterModelSelect'; +import type { CLIPVisionModel, IPAdapterConfig, IPMethod } from 'features/controlLayers/util/controlAdapters'; +import type { TypesafeDroppableData } from 'features/dnd/types'; +import { memo } from 'react'; +import type { ImageDTO, IPAdapterModelConfig, PostUploadAction } from 'services/api/types'; + +type Props = { + ipAdapter: IPAdapterConfig; + onChangeBeginEndStepPct: (beginEndStepPct: [number, number]) => void; + onChangeWeight: (weight: number) => void; + onChangeIPMethod: (method: IPMethod) => void; + onChangeModel: (modelConfig: IPAdapterModelConfig) => void; + onChangeCLIPVisionModel: (clipVisionModel: CLIPVisionModel) => void; + onChangeImage: (imageDTO: ImageDTO | null) => void; + droppableData: TypesafeDroppableData; + postUploadAction: PostUploadAction; +}; + +export const IPAdapter = memo( + ({ + ipAdapter, + onChangeBeginEndStepPct, + onChangeWeight, + onChangeIPMethod, + onChangeModel, + onChangeCLIPVisionModel, + onChangeImage, + droppableData, + postUploadAction, + }: Props) => { + return ( + + + + + + + + + + + + + + + + + + ); + } +); + +IPAdapter.displayName = 'IPAdapter'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/IPAdapterImagePreview.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/IPAdapterImagePreview.tsx new file mode 100644 index 0000000000..7de726cda5 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/IPAdapterImagePreview.tsx @@ -0,0 +1,114 @@ +import type { SystemStyleObject } from '@invoke-ai/ui-library'; +import { Flex, useShiftModifier } from '@invoke-ai/ui-library'; +import { skipToken } from '@reduxjs/toolkit/query'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import IAIDndImage from 'common/components/IAIDndImage'; +import IAIDndImageIcon from 'common/components/IAIDndImageIcon'; +import { setBoundingBoxDimensions } from 'features/canvas/store/canvasSlice'; +import { heightChanged, widthChanged } from 'features/controlLayers/store/controlLayersSlice'; +import type { ImageWithDims } from 'features/controlLayers/util/controlAdapters'; +import type { ImageDraggableData, TypesafeDroppableData } from 'features/dnd/types'; +import { calculateNewSize } from 'features/parameters/components/ImageSize/calculateNewSize'; +import { selectOptimalDimension } from 'features/parameters/store/generationSlice'; +import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; +import { memo, useCallback, useEffect, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiArrowCounterClockwiseBold, PiRulerBold } from 'react-icons/pi'; +import { useGetImageDTOQuery } from 'services/api/endpoints/images'; +import type { ImageDTO, PostUploadAction } from 'services/api/types'; + +type Props = { + image: ImageWithDims | null; + onChangeImage: (imageDTO: ImageDTO | null) => void; + ipAdapterId: string; // required for the dnd/upload interactions + droppableData: TypesafeDroppableData; + postUploadAction: PostUploadAction; +}; + +export const IPAdapterImagePreview = memo( + ({ image, onChangeImage, ipAdapterId, droppableData, postUploadAction }: Props) => { + const { t } = useTranslation(); + const dispatch = useAppDispatch(); + const isConnected = useAppSelector((s) => s.system.isConnected); + const activeTabName = useAppSelector(activeTabNameSelector); + const optimalDimension = useAppSelector(selectOptimalDimension); + const shift = useShiftModifier(); + + const { currentData: controlImage, isError: isErrorControlImage } = useGetImageDTOQuery( + image?.imageName ?? skipToken + ); + const handleResetControlImage = useCallback(() => { + onChangeImage(null); + }, [onChangeImage]); + + const handleSetControlImageToDimensions = useCallback(() => { + if (!controlImage) { + return; + } + + if (activeTabName === 'unifiedCanvas') { + dispatch( + setBoundingBoxDimensions({ width: controlImage.width, height: controlImage.height }, optimalDimension) + ); + } else { + if (shift) { + const { width, height } = controlImage; + dispatch(widthChanged({ width, updateAspectRatio: true })); + dispatch(heightChanged({ height, updateAspectRatio: true })); + } else { + const { width, height } = calculateNewSize( + controlImage.width / controlImage.height, + optimalDimension * optimalDimension + ); + dispatch(widthChanged({ width, updateAspectRatio: true })); + dispatch(heightChanged({ height, updateAspectRatio: true })); + } + } + }, [controlImage, activeTabName, dispatch, optimalDimension, shift]); + + const draggableData = useMemo(() => { + if (controlImage) { + return { + id: ipAdapterId, + payloadType: 'IMAGE_DTO', + payload: { imageDTO: controlImage }, + }; + } + }, [controlImage, ipAdapterId]); + + useEffect(() => { + if (isConnected && isErrorControlImage) { + handleResetControlImage(); + } + }, [handleResetControlImage, isConnected, isErrorControlImage]); + + return ( + + + + <> + : undefined} + tooltip={t('controlnet.resetControlImage')} + /> + : undefined} + tooltip={shift ? t('controlnet.setControlImageDimensionsForce') : t('controlnet.setControlImageDimensions')} + styleOverrides={setControlImageDimensionsStyleOverrides} + /> + + + ); + } +); + +IPAdapterImagePreview.displayName = 'IPAdapterImagePreview'; + +const setControlImageDimensionsStyleOverrides: SystemStyleObject = { mt: 6 }; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/IPAdapterMethod.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/IPAdapterMethod.tsx new file mode 100644 index 0000000000..70fd63f9c0 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/IPAdapterMethod.tsx @@ -0,0 +1,44 @@ +import type { ComboboxOnChange } from '@invoke-ai/ui-library'; +import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library'; +import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; +import type { IPMethod } from 'features/controlLayers/util/controlAdapters'; +import { isIPMethod } from 'features/controlLayers/util/controlAdapters'; +import { memo, useCallback, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { assert } from 'tsafe'; + +type Props = { + method: IPMethod; + onChange: (method: IPMethod) => void; +}; + +export const IPAdapterMethod = memo(({ method, onChange }: Props) => { + const { t } = useTranslation(); + const options: { label: string; value: IPMethod }[] = useMemo( + () => [ + { label: t('controlnet.full'), value: 'full' }, + { label: `${t('controlnet.style')} (${t('common.beta')})`, value: 'style' }, + { label: `${t('controlnet.composition')} (${t('common.beta')})`, value: 'composition' }, + ], + [t] + ); + const _onChange = useCallback( + (v) => { + assert(isIPMethod(v?.value)); + onChange(v.value); + }, + [onChange] + ); + const value = useMemo(() => options.find((o) => o.value === method), [options, method]); + + return ( + + + {t('controlnet.ipAdapterMethod')} + + + + ); +}); + +IPAdapterMethod.displayName = 'IPAdapterMethod'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/IPAdapterModelSelect.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/IPAdapterModelSelect.tsx new file mode 100644 index 0000000000..e47bcd5182 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/IPAdapterModelSelect.tsx @@ -0,0 +1,100 @@ +import type { ComboboxOnChange } from '@invoke-ai/ui-library'; +import { Combobox, Flex, FormControl, Tooltip } from '@invoke-ai/ui-library'; +import { useAppSelector } from 'app/store/storeHooks'; +import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox'; +import type { CLIPVisionModel } from 'features/controlLayers/util/controlAdapters'; +import { isCLIPVisionModel } from 'features/controlLayers/util/controlAdapters'; +import { memo, useCallback, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { useIPAdapterModels } from 'services/api/hooks/modelsByType'; +import type { AnyModelConfig, IPAdapterModelConfig } from 'services/api/types'; +import { assert } from 'tsafe'; + +const CLIP_VISION_OPTIONS = [ + { label: 'ViT-H', value: 'ViT-H' }, + { label: 'ViT-G', value: 'ViT-G' }, +]; + +type Props = { + modelKey: string | null; + onChangeModel: (modelConfig: IPAdapterModelConfig) => void; + clipVisionModel: CLIPVisionModel; + onChangeCLIPVisionModel: (clipVisionModel: CLIPVisionModel) => void; +}; + +export const IPAdapterModelSelect = memo( + ({ modelKey, onChangeModel, clipVisionModel, onChangeCLIPVisionModel }: Props) => { + const { t } = useTranslation(); + const currentBaseModel = useAppSelector((s) => s.generation.model?.base); + const [modelConfigs, { isLoading }] = useIPAdapterModels(); + const selectedModel = useMemo(() => modelConfigs.find((m) => m.key === modelKey), [modelConfigs, modelKey]); + + const _onChangeModel = useCallback( + (modelConfig: IPAdapterModelConfig | null) => { + if (!modelConfig) { + return; + } + onChangeModel(modelConfig); + }, + [onChangeModel] + ); + + const _onChangeCLIPVisionModel = useCallback( + (v) => { + assert(isCLIPVisionModel(v?.value)); + onChangeCLIPVisionModel(v.value); + }, + [onChangeCLIPVisionModel] + ); + + const getIsDisabled = useCallback( + (model: AnyModelConfig): boolean => { + const isCompatible = currentBaseModel === model.base; + const hasMainModel = Boolean(currentBaseModel); + return !hasMainModel || !isCompatible; + }, + [currentBaseModel] + ); + + const { options, value, onChange, noOptionsMessage } = useGroupedModelCombobox({ + modelConfigs, + onChange: _onChangeModel, + selectedModel, + getIsDisabled, + isLoading, + }); + + const clipVisionModelValue = useMemo( + () => CLIP_VISION_OPTIONS.find((o) => o.value === clipVisionModel), + [clipVisionModel] + ); + + return ( + + + + + + + {selectedModel?.format === 'checkpoint' && ( + + + + )} + + ); + } +); + +IPAdapterModelSelect.displayName = 'IPAdapterModelSelect'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/CannyProcessor.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/CannyProcessor.tsx new file mode 100644 index 0000000000..999c8c7764 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/CannyProcessor.tsx @@ -0,0 +1,67 @@ +import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library'; +import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types'; +import { type CannyProcessorConfig, CONTROLNET_PROCESSORS } from 'features/controlLayers/util/controlAdapters'; +import { useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; + +import ProcessorWrapper from './ProcessorWrapper'; + +type Props = ProcessorComponentProps; +const DEFAULTS = CONTROLNET_PROCESSORS['canny_image_processor'].buildDefaults(); + +export const CannyProcessor = ({ onChange, config }: Props) => { + const { t } = useTranslation(); + const handleLowThresholdChanged = useCallback( + (v: number) => { + onChange({ ...config, low_threshold: v }); + }, + [onChange, config] + ); + const handleHighThresholdChanged = useCallback( + (v: number) => { + onChange({ ...config, high_threshold: v }); + }, + [onChange, config] + ); + + return ( + + + {t('controlnet.lowThreshold')} + + + + + {t('controlnet.highThreshold')} + + + + + ); +}; + +CannyProcessor.displayName = 'CannyProcessor'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/ColorMapProcessor.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/ColorMapProcessor.tsx new file mode 100644 index 0000000000..bf2d0d5d6d --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/ColorMapProcessor.tsx @@ -0,0 +1,47 @@ +import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library'; +import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types'; +import { type ColorMapProcessorConfig, CONTROLNET_PROCESSORS } from 'features/controlLayers/util/controlAdapters'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; + +import ProcessorWrapper from './ProcessorWrapper'; + +type Props = ProcessorComponentProps; +const DEFAULTS = CONTROLNET_PROCESSORS['color_map_image_processor'].buildDefaults(); + +export const ColorMapProcessor = memo(({ onChange, config }: Props) => { + const { t } = useTranslation(); + const handleColorMapTileSizeChanged = useCallback( + (v: number) => { + onChange({ ...config, color_map_tile_size: v }); + }, + [config, onChange] + ); + + return ( + + + {t('controlnet.colorMapTileSize')} + + + + + ); +}); + +ColorMapProcessor.displayName = 'ColorMapProcessor'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/ContentShuffleProcessor.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/ContentShuffleProcessor.tsx new file mode 100644 index 0000000000..041ab0ac9a --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/ContentShuffleProcessor.tsx @@ -0,0 +1,79 @@ +import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library'; +import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types'; +import type { ContentShuffleProcessorConfig } from 'features/controlLayers/util/controlAdapters'; +import { CONTROLNET_PROCESSORS } from 'features/controlLayers/util/controlAdapters'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; + +import ProcessorWrapper from './ProcessorWrapper'; + +type Props = ProcessorComponentProps; +const DEFAULTS = CONTROLNET_PROCESSORS['content_shuffle_image_processor'].buildDefaults(); + +export const ContentShuffleProcessor = memo(({ onChange, config }: Props) => { + const { t } = useTranslation(); + + const handleWChanged = useCallback( + (v: number) => { + onChange({ ...config, w: v }); + }, + [config, onChange] + ); + + const handleHChanged = useCallback( + (v: number) => { + onChange({ ...config, h: v }); + }, + [config, onChange] + ); + + const handleFChanged = useCallback( + (v: number) => { + onChange({ ...config, f: v }); + }, + [config, onChange] + ); + + return ( + + + {t('controlnet.w')} + + + + + {t('controlnet.h')} + + + + + {t('controlnet.f')} + + + + + ); +}); + +ContentShuffleProcessor.displayName = 'ContentShuffleProcessor'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/DWOpenposeProcessor.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/DWOpenposeProcessor.tsx new file mode 100644 index 0000000000..70d608f7a9 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/DWOpenposeProcessor.tsx @@ -0,0 +1,62 @@ +import { Flex, FormControl, FormLabel, Switch } from '@invoke-ai/ui-library'; +import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types'; +import type { DWOpenposeProcessorConfig } from 'features/controlLayers/util/controlAdapters'; +import { CONTROLNET_PROCESSORS } from 'features/controlLayers/util/controlAdapters'; +import type { ChangeEvent } from 'react'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; + +import ProcessorWrapper from './ProcessorWrapper'; + +type Props = ProcessorComponentProps; +const DEFAULTS = CONTROLNET_PROCESSORS['dw_openpose_image_processor'].buildDefaults(); + +export const DWOpenposeProcessor = memo(({ onChange, config }: Props) => { + const { t } = useTranslation(); + + const handleDrawBodyChanged = useCallback( + (e: ChangeEvent) => { + onChange({ ...config, draw_body: e.target.checked }); + }, + [config, onChange] + ); + + const handleDrawFaceChanged = useCallback( + (e: ChangeEvent) => { + onChange({ ...config, draw_face: e.target.checked }); + }, + [config, onChange] + ); + + const handleDrawHandsChanged = useCallback( + (e: ChangeEvent) => { + onChange({ ...config, draw_hands: e.target.checked }); + }, + [config, onChange] + ); + + return ( + + + + {t('controlnet.body')} + + + + {t('controlnet.face')} + + + + {t('controlnet.hands')} + + + + + ); +}); + +DWOpenposeProcessor.displayName = 'DWOpenposeProcessor'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/DepthAnythingProcessor.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/DepthAnythingProcessor.tsx new file mode 100644 index 0000000000..d2e14a17f9 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/DepthAnythingProcessor.tsx @@ -0,0 +1,52 @@ +import type { ComboboxOnChange } from '@invoke-ai/ui-library'; +import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library'; +import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types'; +import type { DepthAnythingModelSize, DepthAnythingProcessorConfig } from 'features/controlLayers/util/controlAdapters'; +import { CONTROLNET_PROCESSORS, isDepthAnythingModelSize } from 'features/controlLayers/util/controlAdapters'; +import { memo, useCallback, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; + +import ProcessorWrapper from './ProcessorWrapper'; + +type Props = ProcessorComponentProps; +const DEFAULTS = CONTROLNET_PROCESSORS['depth_anything_image_processor'].buildDefaults(); + +export const DepthAnythingProcessor = memo(({ onChange, config }: Props) => { + const { t } = useTranslation(); + const handleModelSizeChange = useCallback( + (v) => { + if (!isDepthAnythingModelSize(v?.value)) { + return; + } + onChange({ ...config, model_size: v.value }); + }, + [config, onChange] + ); + + const options: { label: string; value: DepthAnythingModelSize }[] = useMemo( + () => [ + { label: t('controlnet.small'), value: 'small' }, + { label: t('controlnet.base'), value: 'base' }, + { label: t('controlnet.large'), value: 'large' }, + ], + [t] + ); + + const value = useMemo(() => options.filter((o) => o.value === config.model_size)[0], [options, config.model_size]); + + return ( + + + {t('controlnet.modelSize')} + + + + ); +}); + +DepthAnythingProcessor.displayName = 'DepthAnythingProcessor'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/HedProcessor.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/HedProcessor.tsx new file mode 100644 index 0000000000..1ca75eae2f --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/HedProcessor.tsx @@ -0,0 +1,32 @@ +import { FormControl, FormLabel, Switch } from '@invoke-ai/ui-library'; +import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types'; +import type { HedProcessorConfig } from 'features/controlLayers/util/controlAdapters'; +import type { ChangeEvent } from 'react'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; + +import ProcessorWrapper from './ProcessorWrapper'; + +type Props = ProcessorComponentProps; + +export const HedProcessor = memo(({ onChange, config }: Props) => { + const { t } = useTranslation(); + + const handleScribbleChanged = useCallback( + (e: ChangeEvent) => { + onChange({ ...config, scribble: e.target.checked }); + }, + [config, onChange] + ); + + return ( + + + {t('controlnet.scribble')} + + + + ); +}); + +HedProcessor.displayName = 'HedProcessor'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/LineartProcessor.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/LineartProcessor.tsx new file mode 100644 index 0000000000..aeb4121a36 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/LineartProcessor.tsx @@ -0,0 +1,32 @@ +import { FormControl, FormLabel, Switch } from '@invoke-ai/ui-library'; +import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types'; +import type { LineartProcessorConfig } from 'features/controlLayers/util/controlAdapters'; +import type { ChangeEvent } from 'react'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; + +import ProcessorWrapper from './ProcessorWrapper'; + +type Props = ProcessorComponentProps; + +export const LineartProcessor = memo(({ onChange, config }: Props) => { + const { t } = useTranslation(); + + const handleCoarseChanged = useCallback( + (e: ChangeEvent) => { + onChange({ ...config, coarse: e.target.checked }); + }, + [config, onChange] + ); + + return ( + + + {t('controlnet.coarse')} + + + + ); +}); + +LineartProcessor.displayName = 'LineartProcessor'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/MediapipeFaceProcessor.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/MediapipeFaceProcessor.tsx new file mode 100644 index 0000000000..72f0d52dc5 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/MediapipeFaceProcessor.tsx @@ -0,0 +1,73 @@ +import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library'; +import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types'; +import { CONTROLNET_PROCESSORS, type MediapipeFaceProcessorConfig } from 'features/controlLayers/util/controlAdapters'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; + +import ProcessorWrapper from './ProcessorWrapper'; + +type Props = ProcessorComponentProps; +const DEFAULTS = CONTROLNET_PROCESSORS['mediapipe_face_processor'].buildDefaults(); + +export const MediapipeFaceProcessor = memo(({ onChange, config }: Props) => { + const { t } = useTranslation(); + + const handleMaxFacesChanged = useCallback( + (v: number) => { + onChange({ ...config, max_faces: v }); + }, + [config, onChange] + ); + + const handleMinConfidenceChanged = useCallback( + (v: number) => { + onChange({ ...config, min_confidence: v }); + }, + [config, onChange] + ); + + return ( + + + {t('controlnet.maxFaces')} + + + + + {t('controlnet.minConfidence')} + + + + + ); +}); + +MediapipeFaceProcessor.displayName = 'MediapipeFaceProcessor'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/MidasDepthProcessor.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/MidasDepthProcessor.tsx new file mode 100644 index 0000000000..9078d14a53 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/MidasDepthProcessor.tsx @@ -0,0 +1,76 @@ +import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library'; +import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types'; +import type { MidasDepthProcessorConfig } from 'features/controlLayers/util/controlAdapters'; +import { CONTROLNET_PROCESSORS } from 'features/controlLayers/util/controlAdapters'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; + +import ProcessorWrapper from './ProcessorWrapper'; + +type Props = ProcessorComponentProps; +const DEFAULTS = CONTROLNET_PROCESSORS['midas_depth_image_processor'].buildDefaults(); + +export const MidasDepthProcessor = memo(({ onChange, config }: Props) => { + const { t } = useTranslation(); + + const handleAMultChanged = useCallback( + (v: number) => { + onChange({ ...config, a_mult: v }); + }, + [config, onChange] + ); + + const handleBgThChanged = useCallback( + (v: number) => { + onChange({ ...config, bg_th: v }); + }, + [config, onChange] + ); + + return ( + + + {t('controlnet.amult')} + + + + + {t('controlnet.bgth')} + + + + + ); +}); + +MidasDepthProcessor.displayName = 'MidasDepthProcessor'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/MlsdImageProcessor.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/MlsdImageProcessor.tsx new file mode 100644 index 0000000000..5fc0c21ecd --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/MlsdImageProcessor.tsx @@ -0,0 +1,76 @@ +import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library'; +import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types'; +import type { MlsdProcessorConfig } from 'features/controlLayers/util/controlAdapters'; +import { CONTROLNET_PROCESSORS } from 'features/controlLayers/util/controlAdapters'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; + +import ProcessorWrapper from './ProcessorWrapper'; + +type Props = ProcessorComponentProps; +const DEFAULTS = CONTROLNET_PROCESSORS['mlsd_image_processor'].buildDefaults(); + +export const MlsdImageProcessor = memo(({ onChange, config }: Props) => { + const { t } = useTranslation(); + + const handleThrDChanged = useCallback( + (v: number) => { + onChange({ ...config, thr_d: v }); + }, + [config, onChange] + ); + + const handleThrVChanged = useCallback( + (v: number) => { + onChange({ ...config, thr_v: v }); + }, + [config, onChange] + ); + + return ( + + + {t('controlnet.w')} + + + + + {t('controlnet.h')} + + + + + ); +}); + +MlsdImageProcessor.displayName = 'MlsdImageProcessor'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/PidiProcessor.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/PidiProcessor.tsx new file mode 100644 index 0000000000..e7d559a1b4 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/PidiProcessor.tsx @@ -0,0 +1,43 @@ +import { FormControl, FormLabel, Switch } from '@invoke-ai/ui-library'; +import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types'; +import type { PidiProcessorConfig } from 'features/controlLayers/util/controlAdapters'; +import type { ChangeEvent } from 'react'; +import { useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; + +import ProcessorWrapper from './ProcessorWrapper'; + +type Props = ProcessorComponentProps; + +export const PidiProcessor = ({ onChange, config }: Props) => { + const { t } = useTranslation(); + + const handleScribbleChanged = useCallback( + (e: ChangeEvent) => { + onChange({ ...config, scribble: e.target.checked }); + }, + [config, onChange] + ); + + const handleSafeChanged = useCallback( + (e: ChangeEvent) => { + onChange({ ...config, safe: e.target.checked }); + }, + [config, onChange] + ); + + return ( + + + {t('controlnet.scribble')} + + + + {t('controlnet.safe')} + + + + ); +}; + +PidiProcessor.displayName = 'PidiProcessor'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/ProcessorWrapper.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/ProcessorWrapper.tsx new file mode 100644 index 0000000000..2b2468703b --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/ProcessorWrapper.tsx @@ -0,0 +1,15 @@ +import { Flex } from '@invoke-ai/ui-library'; +import type { PropsWithChildren } from 'react'; +import { memo } from 'react'; + +type Props = PropsWithChildren; + +const ProcessorWrapper = (props: Props) => { + return ( + + {props.children} + + ); +}; + +export default memo(ProcessorWrapper); diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/types.ts b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/types.ts new file mode 100644 index 0000000000..48a0942678 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlAndIPAdapter/processors/types.ts @@ -0,0 +1,6 @@ +import type { ProcessorConfig } from 'features/controlLayers/util/controlAdapters'; + +export type ProcessorComponentProps = { + onChange: (config: T) => void; + config: T; +}; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersPanelContent.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersPanelContent.tsx index e2865be356..ffa2856116 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersPanelContent.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlLayersPanelContent.tsx @@ -4,10 +4,10 @@ import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; import { useAppSelector } from 'app/store/storeHooks'; import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent'; import { AddLayerButton } from 'features/controlLayers/components/AddLayerButton'; -import { CALayerListItem } from 'features/controlLayers/components/CALayerListItem'; +import { CALayer } from 'features/controlLayers/components/CALayer/CALayer'; import { DeleteAllLayersButton } from 'features/controlLayers/components/DeleteAllLayersButton'; -import { IPLayerListItem } from 'features/controlLayers/components/IPLayerListItem'; -import { RGLayerListItem } from 'features/controlLayers/components/RGLayerListItem'; +import { IPALayer } from 'features/controlLayers/components/IPALayer/IPALayer'; +import { RGLayer } from 'features/controlLayers/components/RGLayer/RGLayer'; import { isRenderableLayer, selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice'; import type { Layer } from 'features/controlLayers/store/types'; import { partition } from 'lodash-es'; @@ -46,13 +46,13 @@ type LayerWrapperProps = { const LayerWrapper = memo(({ id, type }: LayerWrapperProps) => { if (type === 'regional_guidance_layer') { - return ; + return ; } if (type === 'control_adapter_layer') { - return ; + return ; } if (type === 'ip_adapter_layer') { - return ; + return ; } }); diff --git a/invokeai/frontend/web/src/features/controlLayers/components/DeleteAllLayersButton.tsx b/invokeai/frontend/web/src/features/controlLayers/components/DeleteAllLayersButton.tsx index c55864afa5..dad102b470 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/DeleteAllLayersButton.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/DeleteAllLayersButton.tsx @@ -1,6 +1,6 @@ import { Button } from '@invoke-ai/ui-library'; -import { allLayersDeleted } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge'; -import { useAppDispatch } from 'app/store/storeHooks'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { allLayersDeleted } from 'features/controlLayers/store/controlLayersSlice'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; import { PiTrashSimpleBold } from 'react-icons/pi'; @@ -8,12 +8,19 @@ import { PiTrashSimpleBold } from 'react-icons/pi'; export const DeleteAllLayersButton = memo(() => { const { t } = useTranslation(); const dispatch = useAppDispatch(); + const isDisabled = useAppSelector((s) => s.controlLayers.present.layers.length === 0); const onClick = useCallback(() => { dispatch(allLayersDeleted()); }, [dispatch]); return ( - ); diff --git a/invokeai/frontend/web/src/features/controlLayers/components/IPALayer/IPALayer.tsx b/invokeai/frontend/web/src/features/controlLayers/components/IPALayer/IPALayer.tsx new file mode 100644 index 0000000000..715e538679 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/IPALayer/IPALayer.tsx @@ -0,0 +1,33 @@ +import { Flex, Spacer, useDisclosure } from '@invoke-ai/ui-library'; +import { IPALayerIPAdapterWrapper } from 'features/controlLayers/components/IPALayer/IPALayerIPAdapterWrapper'; +import { LayerDeleteButton } from 'features/controlLayers/components/LayerCommon/LayerDeleteButton'; +import { LayerTitle } from 'features/controlLayers/components/LayerCommon/LayerTitle'; +import { LayerVisibilityToggle } from 'features/controlLayers/components/LayerCommon/LayerVisibilityToggle'; +import { memo } from 'react'; + +type Props = { + layerId: string; +}; + +export const IPALayer = memo(({ layerId }: Props) => { + const { isOpen, onToggle } = useDisclosure({ defaultIsOpen: true }); + return ( + + + + + + + + + {isOpen && ( + + + + )} + + + ); +}); + +IPALayer.displayName = 'IPALayer'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/IPALayer/IPALayerIPAdapterWrapper.tsx b/invokeai/frontend/web/src/features/controlLayers/components/IPALayer/IPALayerIPAdapterWrapper.tsx new file mode 100644 index 0000000000..b8dfae6c03 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/IPALayer/IPALayerIPAdapterWrapper.tsx @@ -0,0 +1,106 @@ +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { IPAdapter } from 'features/controlLayers/components/ControlAndIPAdapter/IPAdapter'; +import { + caOrIPALayerBeginEndStepPctChanged, + caOrIPALayerWeightChanged, + ipaLayerCLIPVisionModelChanged, + ipaLayerImageChanged, + ipaLayerMethodChanged, + ipaLayerModelChanged, + selectIPALayerOrThrow, +} from 'features/controlLayers/store/controlLayersSlice'; +import type { CLIPVisionModel, IPMethod } from 'features/controlLayers/util/controlAdapters'; +import type { IPALayerImageDropData } from 'features/dnd/types'; +import { memo, useCallback, useMemo } from 'react'; +import type { ImageDTO, IPAdapterModelConfig, IPALayerImagePostUploadAction } from 'services/api/types'; + +type Props = { + layerId: string; +}; + +export const IPALayerIPAdapterWrapper = memo(({ layerId }: Props) => { + const dispatch = useAppDispatch(); + const ipAdapter = useAppSelector((s) => selectIPALayerOrThrow(s.controlLayers.present, layerId).ipAdapter); + + const onChangeBeginEndStepPct = useCallback( + (beginEndStepPct: [number, number]) => { + dispatch( + caOrIPALayerBeginEndStepPctChanged({ + layerId, + beginEndStepPct, + }) + ); + }, + [dispatch, layerId] + ); + + const onChangeWeight = useCallback( + (weight: number) => { + dispatch(caOrIPALayerWeightChanged({ layerId, weight })); + }, + [dispatch, layerId] + ); + + const onChangeIPMethod = useCallback( + (method: IPMethod) => { + dispatch(ipaLayerMethodChanged({ layerId, method })); + }, + [dispatch, layerId] + ); + + const onChangeModel = useCallback( + (modelConfig: IPAdapterModelConfig) => { + dispatch(ipaLayerModelChanged({ layerId, modelConfig })); + }, + [dispatch, layerId] + ); + + const onChangeCLIPVisionModel = useCallback( + (clipVisionModel: CLIPVisionModel) => { + dispatch(ipaLayerCLIPVisionModelChanged({ layerId, clipVisionModel })); + }, + [dispatch, layerId] + ); + + const onChangeImage = useCallback( + (imageDTO: ImageDTO | null) => { + dispatch(ipaLayerImageChanged({ layerId, imageDTO })); + }, + [dispatch, layerId] + ); + + const droppableData = useMemo( + () => ({ + actionType: 'SET_IPA_LAYER_IMAGE', + context: { + layerId, + }, + id: layerId, + }), + [layerId] + ); + + const postUploadAction = useMemo( + () => ({ + type: 'SET_IPA_LAYER_IMAGE', + layerId, + }), + [layerId] + ); + + return ( + + ); +}); + +IPALayerIPAdapterWrapper.displayName = 'IPALayerIPAdapterWrapper'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/IPLayerListItem.tsx b/invokeai/frontend/web/src/features/controlLayers/components/IPLayerListItem.tsx deleted file mode 100644 index bdc54373a0..0000000000 --- a/invokeai/frontend/web/src/features/controlLayers/components/IPLayerListItem.tsx +++ /dev/null @@ -1,47 +0,0 @@ -import { Flex, Spacer, useDisclosure } from '@invoke-ai/ui-library'; -import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; -import { useAppSelector } from 'app/store/storeHooks'; -import ControlAdapterLayerConfig from 'features/controlLayers/components/controlAdapterOverrides/ControlAdapterLayerConfig'; -import { LayerDeleteButton } from 'features/controlLayers/components/LayerDeleteButton'; -import { LayerTitle } from 'features/controlLayers/components/LayerTitle'; -import { LayerVisibilityToggle } from 'features/controlLayers/components/LayerVisibilityToggle'; -import { isIPAdapterLayer, selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice'; -import { memo, useMemo } from 'react'; -import { assert } from 'tsafe'; - -type Props = { - layerId: string; -}; - -export const IPLayerListItem = memo(({ layerId }: Props) => { - const selector = useMemo( - () => - createMemoizedSelector(selectControlLayersSlice, (controlLayers) => { - const layer = controlLayers.present.layers.find((l) => l.id === layerId); - assert(isIPAdapterLayer(layer), `Layer ${layerId} not found or not an IP Adapter layer`); - return layer.ipAdapterId; - }), - [layerId] - ); - const ipAdapterId = useAppSelector(selector); - const { isOpen, onToggle } = useDisclosure({ defaultIsOpen: true }); - return ( - - - - - - - - - {isOpen && ( - - - - )} - - - ); -}); - -IPLayerListItem.displayName = 'IPLayerListItem'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/LayerDeleteButton.tsx b/invokeai/frontend/web/src/features/controlLayers/components/LayerCommon/LayerDeleteButton.tsx similarity index 84% rename from invokeai/frontend/web/src/features/controlLayers/components/LayerDeleteButton.tsx rename to invokeai/frontend/web/src/features/controlLayers/components/LayerCommon/LayerDeleteButton.tsx index 0c74b2a9ea..0cd7d83dfe 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/LayerDeleteButton.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/LayerCommon/LayerDeleteButton.tsx @@ -1,7 +1,7 @@ import { IconButton } from '@invoke-ai/ui-library'; -import { guidanceLayerDeleted } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge'; import { useAppDispatch } from 'app/store/storeHooks'; import { stopPropagation } from 'common/util/stopPropagation'; +import { layerDeleted } from 'features/controlLayers/store/controlLayersSlice'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; import { PiTrashSimpleBold } from 'react-icons/pi'; @@ -12,7 +12,7 @@ export const LayerDeleteButton = memo(({ layerId }: Props) => { const { t } = useTranslation(); const dispatch = useAppDispatch(); const deleteLayer = useCallback(() => { - dispatch(guidanceLayerDeleted(layerId)); + dispatch(layerDeleted(layerId)); }, [dispatch, layerId]); return ( { const dispatch = useAppDispatch(); const { t } = useTranslation(); + const [addIPAdapter, isAddIPAdapterDisabled] = useAddIPAdapterToIPALayer(layerId); const selectValidActions = useMemo( () => createMemoizedSelector(selectControlLayersSlice, (controlLayers) => { @@ -32,13 +33,10 @@ export const LayerMenuRGActions = memo(({ layerId }: Props) => { ); const validActions = useAppSelector(selectValidActions); const addPositivePrompt = useCallback(() => { - dispatch(maskLayerPositivePromptChanged({ layerId, prompt: '' })); + dispatch(rgLayerPositivePromptChanged({ layerId, prompt: '' })); }, [dispatch, layerId]); const addNegativePrompt = useCallback(() => { - dispatch(maskLayerNegativePromptChanged({ layerId, prompt: '' })); - }, [dispatch, layerId]); - const addIPAdapter = useCallback(() => { - dispatch(guidanceLayerIPAdapterAdded(layerId)); + dispatch(rgLayerNegativePromptChanged({ layerId, prompt: '' })); }, [dispatch, layerId]); return ( <> @@ -48,7 +46,7 @@ export const LayerMenuRGActions = memo(({ layerId }: Props) => { }> {t('controlLayers.addNegativePrompt')} - }> + } isDisabled={isAddIPAdapterDisabled}> {t('controlLayers.addIPAdapter')} diff --git a/invokeai/frontend/web/src/features/controlLayers/components/LayerTitle.tsx b/invokeai/frontend/web/src/features/controlLayers/components/LayerCommon/LayerTitle.tsx similarity index 100% rename from invokeai/frontend/web/src/features/controlLayers/components/LayerTitle.tsx rename to invokeai/frontend/web/src/features/controlLayers/components/LayerCommon/LayerTitle.tsx diff --git a/invokeai/frontend/web/src/features/controlLayers/components/LayerVisibilityToggle.tsx b/invokeai/frontend/web/src/features/controlLayers/components/LayerCommon/LayerVisibilityToggle.tsx similarity index 100% rename from invokeai/frontend/web/src/features/controlLayers/components/LayerVisibilityToggle.tsx rename to invokeai/frontend/web/src/features/controlLayers/components/LayerCommon/LayerVisibilityToggle.tsx diff --git a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerListItem.tsx b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayer.tsx similarity index 80% rename from invokeai/frontend/web/src/features/controlLayers/components/RGLayerListItem.tsx rename to invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayer.tsx index 3c126cabaa..baed22f6ca 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerListItem.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayer.tsx @@ -2,15 +2,11 @@ import { Badge, Flex, Spacer, useDisclosure } from '@invoke-ai/ui-library'; import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { rgbColorToString } from 'features/canvas/util/colorToString'; -import { LayerDeleteButton } from 'features/controlLayers/components/LayerDeleteButton'; -import { LayerMenu } from 'features/controlLayers/components/LayerMenu'; -import { LayerTitle } from 'features/controlLayers/components/LayerTitle'; -import { LayerVisibilityToggle } from 'features/controlLayers/components/LayerVisibilityToggle'; -import { RGLayerColorPicker } from 'features/controlLayers/components/RGLayerColorPicker'; -import { RGLayerIPAdapterList } from 'features/controlLayers/components/RGLayerIPAdapterList'; -import { RGLayerNegativePrompt } from 'features/controlLayers/components/RGLayerNegativePrompt'; -import { RGLayerPositivePrompt } from 'features/controlLayers/components/RGLayerPositivePrompt'; -import RGLayerSettingsPopover from 'features/controlLayers/components/RGLayerSettingsPopover'; +import { AddPromptButtons } from 'features/controlLayers/components/AddPromptButtons'; +import { LayerDeleteButton } from 'features/controlLayers/components/LayerCommon/LayerDeleteButton'; +import { LayerMenu } from 'features/controlLayers/components/LayerCommon/LayerMenu'; +import { LayerTitle } from 'features/controlLayers/components/LayerCommon/LayerTitle'; +import { LayerVisibilityToggle } from 'features/controlLayers/components/LayerCommon/LayerVisibilityToggle'; import { isRegionalGuidanceLayer, layerSelected, @@ -20,13 +16,17 @@ import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; import { assert } from 'tsafe'; -import { AddPromptButtons } from './AddPromptButtons'; +import { RGLayerColorPicker } from './RGLayerColorPicker'; +import { RGLayerIPAdapterList } from './RGLayerIPAdapterList'; +import { RGLayerNegativePrompt } from './RGLayerNegativePrompt'; +import { RGLayerPositivePrompt } from './RGLayerPositivePrompt'; +import RGLayerSettingsPopover from './RGLayerSettingsPopover'; type Props = { layerId: string; }; -export const RGLayerListItem = memo(({ layerId }: Props) => { +export const RGLayer = memo(({ layerId }: Props) => { const { t } = useTranslation(); const dispatch = useAppDispatch(); const selector = useMemo( @@ -38,7 +38,7 @@ export const RGLayerListItem = memo(({ layerId }: Props) => { color: rgbColorToString(layer.previewColor), hasPositivePrompt: layer.positivePrompt !== null, hasNegativePrompt: layer.negativePrompt !== null, - hasIPAdapters: layer.ipAdapterIds.length > 0, + hasIPAdapters: layer.ipAdapters.length > 0, isSelected: layerId === controlLayers.present.selectedLayerId, autoNegative: layer.autoNegative, }; @@ -81,4 +81,4 @@ export const RGLayerListItem = memo(({ layerId }: Props) => { ); }); -RGLayerListItem.displayName = 'RGLayerListItem'; +RGLayer.displayName = 'RGLayer'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerAutoNegativeCheckbox.tsx b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerAutoNegativeCheckbox.tsx similarity index 91% rename from invokeai/frontend/web/src/features/controlLayers/components/RGLayerAutoNegativeCheckbox.tsx rename to invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerAutoNegativeCheckbox.tsx index 6f03d4b28d..89edb58d2f 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerAutoNegativeCheckbox.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerAutoNegativeCheckbox.tsx @@ -3,7 +3,7 @@ import { createSelector } from '@reduxjs/toolkit'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { isRegionalGuidanceLayer, - maskLayerAutoNegativeChanged, + rgLayerAutoNegativeChanged, selectControlLayersSlice, } from 'features/controlLayers/store/controlLayersSlice'; import type { ChangeEvent } from 'react'; @@ -35,7 +35,7 @@ export const RGLayerAutoNegativeCheckbox = memo(({ layerId }: Props) => { const autoNegative = useAutoNegative(layerId); const onChange = useCallback( (e: ChangeEvent) => { - dispatch(maskLayerAutoNegativeChanged({ layerId, autoNegative: e.target.checked ? 'invert' : 'off' })); + dispatch(rgLayerAutoNegativeChanged({ layerId, autoNegative: e.target.checked ? 'invert' : 'off' })); }, [dispatch, layerId] ); diff --git a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerColorPicker.tsx b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerColorPicker.tsx similarity index 95% rename from invokeai/frontend/web/src/features/controlLayers/components/RGLayerColorPicker.tsx rename to invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerColorPicker.tsx index e76ab57a51..624047caf3 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerColorPicker.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerColorPicker.tsx @@ -6,7 +6,7 @@ import { stopPropagation } from 'common/util/stopPropagation'; import { rgbColorToString } from 'features/canvas/util/colorToString'; import { isRegionalGuidanceLayer, - maskLayerPreviewColorChanged, + rgLayerPreviewColorChanged, selectControlLayersSlice, } from 'features/controlLayers/store/controlLayersSlice'; import { memo, useCallback, useMemo } from 'react'; @@ -33,7 +33,7 @@ export const RGLayerColorPicker = memo(({ layerId }: Props) => { const dispatch = useAppDispatch(); const onColorChange = useCallback( (color: RgbColor) => { - dispatch(maskLayerPreviewColorChanged({ layerId, color })); + dispatch(rgLayerPreviewColorChanged({ layerId, color })); }, [dispatch, layerId] ); diff --git a/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerIPAdapterList.tsx b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerIPAdapterList.tsx new file mode 100644 index 0000000000..578d3789bf --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerIPAdapterList.tsx @@ -0,0 +1,45 @@ +import { Divider, Flex } from '@invoke-ai/ui-library'; +import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; +import { useAppSelector } from 'app/store/storeHooks'; +import { RGLayerIPAdapterWrapper } from 'features/controlLayers/components/RGLayer/RGLayerIPAdapterWrapper'; +import { isRegionalGuidanceLayer, selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice'; +import { memo, useMemo } from 'react'; +import { assert } from 'tsafe'; + +type Props = { + layerId: string; +}; + +export const RGLayerIPAdapterList = memo(({ layerId }: Props) => { + const selectIPAdapterIds = useMemo( + () => + createMemoizedSelector(selectControlLayersSlice, (controlLayers) => { + const layer = controlLayers.present.layers.filter(isRegionalGuidanceLayer).find((l) => l.id === layerId); + assert(layer, `Layer ${layerId} not found`); + return layer.ipAdapters; + }), + [layerId] + ); + const ipAdapters = useAppSelector(selectIPAdapterIds); + + if (ipAdapters.length === 0) { + return null; + } + + return ( + <> + {ipAdapters.map(({ id }, index) => ( + + {index > 0 && ( + + + + )} + + + ))} + + ); +}); + +RGLayerIPAdapterList.displayName = 'RGLayerIPAdapterList'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerIPAdapterWrapper.tsx b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerIPAdapterWrapper.tsx new file mode 100644 index 0000000000..015cf75e4d --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerIPAdapterWrapper.tsx @@ -0,0 +1,131 @@ +import { Flex, IconButton, Spacer, Text } from '@invoke-ai/ui-library'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { IPAdapter } from 'features/controlLayers/components/ControlAndIPAdapter/IPAdapter'; +import { + rgLayerIPAdapterBeginEndStepPctChanged, + rgLayerIPAdapterCLIPVisionModelChanged, + rgLayerIPAdapterDeleted, + rgLayerIPAdapterImageChanged, + rgLayerIPAdapterMethodChanged, + rgLayerIPAdapterModelChanged, + rgLayerIPAdapterWeightChanged, + selectRGLayerIPAdapterOrThrow, +} from 'features/controlLayers/store/controlLayersSlice'; +import type { CLIPVisionModel, IPMethod } from 'features/controlLayers/util/controlAdapters'; +import type { RGLayerIPAdapterImageDropData } from 'features/dnd/types'; +import { memo, useCallback, useMemo } from 'react'; +import { PiTrashSimpleBold } from 'react-icons/pi'; +import type { ImageDTO, IPAdapterModelConfig, RGLayerIPAdapterImagePostUploadAction } from 'services/api/types'; + +type Props = { + layerId: string; + ipAdapterId: string; + ipAdapterNumber: number; +}; + +export const RGLayerIPAdapterWrapper = memo(({ layerId, ipAdapterId, ipAdapterNumber }: Props) => { + const dispatch = useAppDispatch(); + const onDeleteIPAdapter = useCallback(() => { + dispatch(rgLayerIPAdapterDeleted({ layerId, ipAdapterId })); + }, [dispatch, ipAdapterId, layerId]); + const ipAdapter = useAppSelector((s) => selectRGLayerIPAdapterOrThrow(s.controlLayers.present, layerId, ipAdapterId)); + + const onChangeBeginEndStepPct = useCallback( + (beginEndStepPct: [number, number]) => { + dispatch( + rgLayerIPAdapterBeginEndStepPctChanged({ + layerId, + ipAdapterId, + beginEndStepPct, + }) + ); + }, + [dispatch, ipAdapterId, layerId] + ); + + const onChangeWeight = useCallback( + (weight: number) => { + dispatch(rgLayerIPAdapterWeightChanged({ layerId, ipAdapterId, weight })); + }, + [dispatch, ipAdapterId, layerId] + ); + + const onChangeIPMethod = useCallback( + (method: IPMethod) => { + dispatch(rgLayerIPAdapterMethodChanged({ layerId, ipAdapterId, method })); + }, + [dispatch, ipAdapterId, layerId] + ); + + const onChangeModel = useCallback( + (modelConfig: IPAdapterModelConfig) => { + dispatch(rgLayerIPAdapterModelChanged({ layerId, ipAdapterId, modelConfig })); + }, + [dispatch, ipAdapterId, layerId] + ); + + const onChangeCLIPVisionModel = useCallback( + (clipVisionModel: CLIPVisionModel) => { + dispatch(rgLayerIPAdapterCLIPVisionModelChanged({ layerId, ipAdapterId, clipVisionModel })); + }, + [dispatch, ipAdapterId, layerId] + ); + + const onChangeImage = useCallback( + (imageDTO: ImageDTO | null) => { + dispatch(rgLayerIPAdapterImageChanged({ layerId, ipAdapterId, imageDTO })); + }, + [dispatch, ipAdapterId, layerId] + ); + + const droppableData = useMemo( + () => ({ + actionType: 'SET_RG_LAYER_IP_ADAPTER_IMAGE', + context: { + layerId, + ipAdapterId, + }, + id: layerId, + }), + [ipAdapterId, layerId] + ); + + const postUploadAction = useMemo( + () => ({ + type: 'SET_RG_LAYER_IP_ADAPTER_IMAGE', + layerId, + ipAdapterId, + }), + [ipAdapterId, layerId] + ); + + return ( + + + {`IP Adapter ${ipAdapterNumber}`} + + } + aria-label="Delete IP Adapter" + onClick={onDeleteIPAdapter} + variant="ghost" + colorScheme="error" + /> + + + + ); +}); + +RGLayerIPAdapterWrapper.displayName = 'RGLayerIPAdapterWrapper'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerNegativePrompt.tsx b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerNegativePrompt.tsx similarity index 90% rename from invokeai/frontend/web/src/features/controlLayers/components/RGLayerNegativePrompt.tsx rename to invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerNegativePrompt.tsx index e869c8809a..ba02aa9242 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerNegativePrompt.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerNegativePrompt.tsx @@ -1,8 +1,8 @@ import { Box, Textarea } from '@invoke-ai/ui-library'; import { useAppDispatch } from 'app/store/storeHooks'; -import { RGLayerPromptDeleteButton } from 'features/controlLayers/components/RGLayerPromptDeleteButton'; +import { RGLayerPromptDeleteButton } from 'features/controlLayers/components/RGLayer/RGLayerPromptDeleteButton'; import { useLayerNegativePrompt } from 'features/controlLayers/hooks/layerStateHooks'; -import { maskLayerNegativePromptChanged } from 'features/controlLayers/store/controlLayersSlice'; +import { rgLayerNegativePromptChanged } from 'features/controlLayers/store/controlLayersSlice'; import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper'; import { AddPromptTriggerButton } from 'features/prompt/AddPromptTriggerButton'; import { PromptPopover } from 'features/prompt/PromptPopover'; @@ -21,7 +21,7 @@ export const RGLayerNegativePrompt = memo(({ layerId }: Props) => { const { t } = useTranslation(); const _onChange = useCallback( (v: string) => { - dispatch(maskLayerNegativePromptChanged({ layerId, prompt: v })); + dispatch(rgLayerNegativePromptChanged({ layerId, prompt: v })); }, [dispatch, layerId] ); diff --git a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerPositivePrompt.tsx b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerPositivePrompt.tsx similarity index 90% rename from invokeai/frontend/web/src/features/controlLayers/components/RGLayerPositivePrompt.tsx rename to invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerPositivePrompt.tsx index 6d508338c1..6f85ea077c 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerPositivePrompt.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerPositivePrompt.tsx @@ -1,8 +1,8 @@ import { Box, Textarea } from '@invoke-ai/ui-library'; import { useAppDispatch } from 'app/store/storeHooks'; -import { RGLayerPromptDeleteButton } from 'features/controlLayers/components/RGLayerPromptDeleteButton'; +import { RGLayerPromptDeleteButton } from 'features/controlLayers/components/RGLayer/RGLayerPromptDeleteButton'; import { useLayerPositivePrompt } from 'features/controlLayers/hooks/layerStateHooks'; -import { maskLayerPositivePromptChanged } from 'features/controlLayers/store/controlLayersSlice'; +import { rgLayerPositivePromptChanged } from 'features/controlLayers/store/controlLayersSlice'; import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper'; import { AddPromptTriggerButton } from 'features/prompt/AddPromptTriggerButton'; import { PromptPopover } from 'features/prompt/PromptPopover'; @@ -21,7 +21,7 @@ export const RGLayerPositivePrompt = memo(({ layerId }: Props) => { const { t } = useTranslation(); const _onChange = useCallback( (v: string) => { - dispatch(maskLayerPositivePromptChanged({ layerId, prompt: v })); + dispatch(rgLayerPositivePromptChanged({ layerId, prompt: v })); }, [dispatch, layerId] ); diff --git a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerPromptDeleteButton.tsx b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerPromptDeleteButton.tsx similarity index 82% rename from invokeai/frontend/web/src/features/controlLayers/components/RGLayerPromptDeleteButton.tsx rename to invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerPromptDeleteButton.tsx index 9a32bb68ad..62a4ddfaeb 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerPromptDeleteButton.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerPromptDeleteButton.tsx @@ -1,8 +1,8 @@ import { IconButton, Tooltip } from '@invoke-ai/ui-library'; import { useAppDispatch } from 'app/store/storeHooks'; import { - maskLayerNegativePromptChanged, - maskLayerPositivePromptChanged, + rgLayerNegativePromptChanged, + rgLayerPositivePromptChanged, } from 'features/controlLayers/store/controlLayersSlice'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; @@ -18,9 +18,9 @@ export const RGLayerPromptDeleteButton = memo(({ layerId, polarity }: Props) => const dispatch = useAppDispatch(); const onClick = useCallback(() => { if (polarity === 'positive') { - dispatch(maskLayerPositivePromptChanged({ layerId, prompt: null })); + dispatch(rgLayerPositivePromptChanged({ layerId, prompt: null })); } else { - dispatch(maskLayerNegativePromptChanged({ layerId, prompt: null })); + dispatch(rgLayerNegativePromptChanged({ layerId, prompt: null })); } }, [dispatch, layerId, polarity]); return ( diff --git a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerSettingsPopover.tsx b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerSettingsPopover.tsx similarity index 96% rename from invokeai/frontend/web/src/features/controlLayers/components/RGLayerSettingsPopover.tsx rename to invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerSettingsPopover.tsx index e270748b9b..9203069b3c 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerSettingsPopover.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/RGLayer/RGLayerSettingsPopover.tsx @@ -10,7 +10,7 @@ import { PopoverTrigger, } from '@invoke-ai/ui-library'; import { stopPropagation } from 'common/util/stopPropagation'; -import { RGLayerAutoNegativeCheckbox } from 'features/controlLayers/components/RGLayerAutoNegativeCheckbox'; +import { RGLayerAutoNegativeCheckbox } from 'features/controlLayers/components/RGLayer/RGLayerAutoNegativeCheckbox'; import { memo } from 'react'; import { useTranslation } from 'react-i18next'; import { PiGearSixBold } from 'react-icons/pi'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerIPAdapterList.tsx b/invokeai/frontend/web/src/features/controlLayers/components/RGLayerIPAdapterList.tsx deleted file mode 100644 index 464bd41897..0000000000 --- a/invokeai/frontend/web/src/features/controlLayers/components/RGLayerIPAdapterList.tsx +++ /dev/null @@ -1,80 +0,0 @@ -import { Divider, Flex, IconButton, Spacer, Text } from '@invoke-ai/ui-library'; -import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; -import { guidanceLayerIPAdapterDeleted } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge'; -import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import ControlAdapterLayerConfig from 'features/controlLayers/components/controlAdapterOverrides/ControlAdapterLayerConfig'; -import { isRegionalGuidanceLayer, selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice'; -import { memo, useCallback, useMemo } from 'react'; -import { PiTrashSimpleBold } from 'react-icons/pi'; -import { assert } from 'tsafe'; - -type Props = { - layerId: string; -}; - -export const RGLayerIPAdapterList = memo(({ layerId }: Props) => { - const selectIPAdapterIds = useMemo( - () => - createMemoizedSelector(selectControlLayersSlice, (controlLayers) => { - const layer = controlLayers.present.layers.filter(isRegionalGuidanceLayer).find((l) => l.id === layerId); - assert(layer, `Layer ${layerId} not found`); - return layer.ipAdapterIds; - }), - [layerId] - ); - const ipAdapterIds = useAppSelector(selectIPAdapterIds); - - if (ipAdapterIds.length === 0) { - return null; - } - - return ( - <> - {ipAdapterIds.map((id, index) => ( - - {index > 0 && ( - - - - )} - - - ))} - - ); -}); - -RGLayerIPAdapterList.displayName = 'RGLayerIPAdapterList'; - -type IPAdapterListItemProps = { - layerId: string; - ipAdapterId: string; - ipAdapterNumber: number; -}; - -const RGLayerIPAdapterListItem = memo(({ layerId, ipAdapterId, ipAdapterNumber }: IPAdapterListItemProps) => { - const dispatch = useAppDispatch(); - const onDeleteIPAdapter = useCallback(() => { - dispatch(guidanceLayerIPAdapterDeleted({ layerId, ipAdapterId })); - }, [dispatch, ipAdapterId, layerId]); - - return ( - - - {`IP Adapter ${ipAdapterNumber}`} - - } - aria-label="Delete IP Adapter" - onClick={onDeleteIPAdapter} - variant="ghost" - colorScheme="error" - /> - - - - ); -}); - -RGLayerIPAdapterListItem.displayName = 'RGLayerIPAdapterListItem'; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ControlAdapterImagePreview.tsx b/invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ControlAdapterImagePreview.tsx deleted file mode 100644 index b3094e5599..0000000000 --- a/invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ControlAdapterImagePreview.tsx +++ /dev/null @@ -1,237 +0,0 @@ -import type { SystemStyleObject } from '@invoke-ai/ui-library'; -import { Box, Flex, Spinner, useShiftModifier } from '@invoke-ai/ui-library'; -import { skipToken } from '@reduxjs/toolkit/query'; -import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; -import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import IAIDndImage from 'common/components/IAIDndImage'; -import IAIDndImageIcon from 'common/components/IAIDndImageIcon'; -import { setBoundingBoxDimensions } from 'features/canvas/store/canvasSlice'; -import { useControlAdapterControlImage } from 'features/controlAdapters/hooks/useControlAdapterControlImage'; -import { useControlAdapterProcessedControlImage } from 'features/controlAdapters/hooks/useControlAdapterProcessedControlImage'; -import { useControlAdapterProcessorType } from 'features/controlAdapters/hooks/useControlAdapterProcessorType'; -import { - controlAdapterImageChanged, - selectControlAdaptersSlice, -} from 'features/controlAdapters/store/controlAdaptersSlice'; -import { heightChanged, widthChanged } from 'features/controlLayers/store/controlLayersSlice'; -import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types'; -import { calculateNewSize } from 'features/parameters/components/ImageSize/calculateNewSize'; -import { selectOptimalDimension } from 'features/parameters/store/generationSlice'; -import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; -import { memo, useCallback, useEffect, useMemo, useState } from 'react'; -import { useTranslation } from 'react-i18next'; -import { PiArrowCounterClockwiseBold, PiFloppyDiskBold, PiRulerBold } from 'react-icons/pi'; -import { - useAddImageToBoardMutation, - useChangeImageIsIntermediateMutation, - useGetImageDTOQuery, - useRemoveImageFromBoardMutation, -} from 'services/api/endpoints/images'; -import type { PostUploadAction } from 'services/api/types'; - -type Props = { - id: string; - isSmall?: boolean; -}; - -const selectPendingControlImages = createMemoizedSelector( - selectControlAdaptersSlice, - (controlAdapters) => controlAdapters.pendingControlImages -); - -const ControlAdapterImagePreview = ({ isSmall, id }: Props) => { - const { t } = useTranslation(); - const dispatch = useAppDispatch(); - const controlImageName = useControlAdapterControlImage(id); - const processedControlImageName = useControlAdapterProcessedControlImage(id); - const processorType = useControlAdapterProcessorType(id); - const autoAddBoardId = useAppSelector((s) => s.gallery.autoAddBoardId); - const isConnected = useAppSelector((s) => s.system.isConnected); - const activeTabName = useAppSelector(activeTabNameSelector); - const optimalDimension = useAppSelector(selectOptimalDimension); - const pendingControlImages = useAppSelector(selectPendingControlImages); - const shift = useShiftModifier(); - - const [isMouseOverImage, setIsMouseOverImage] = useState(false); - - const { currentData: controlImage, isError: isErrorControlImage } = useGetImageDTOQuery( - controlImageName ?? skipToken - ); - - const { currentData: processedControlImage, isError: isErrorProcessedControlImage } = useGetImageDTOQuery( - processedControlImageName ?? skipToken - ); - - const [changeIsIntermediate] = useChangeImageIsIntermediateMutation(); - const [addToBoard] = useAddImageToBoardMutation(); - const [removeFromBoard] = useRemoveImageFromBoardMutation(); - const handleResetControlImage = useCallback(() => { - dispatch(controlAdapterImageChanged({ id, controlImage: null })); - }, [id, dispatch]); - - const handleSaveControlImage = useCallback(async () => { - if (!processedControlImage) { - return; - } - - await changeIsIntermediate({ - imageDTO: processedControlImage, - is_intermediate: false, - }).unwrap(); - - if (autoAddBoardId !== 'none') { - addToBoard({ - imageDTO: processedControlImage, - board_id: autoAddBoardId, - }); - } else { - removeFromBoard({ imageDTO: processedControlImage }); - } - }, [processedControlImage, changeIsIntermediate, autoAddBoardId, addToBoard, removeFromBoard]); - - const handleSetControlImageToDimensions = useCallback(() => { - if (!controlImage) { - return; - } - - if (activeTabName === 'unifiedCanvas') { - dispatch(setBoundingBoxDimensions({ width: controlImage.width, height: controlImage.height }, optimalDimension)); - } else { - if (shift) { - const { width, height } = controlImage; - dispatch(widthChanged({ width, updateAspectRatio: true })); - dispatch(heightChanged({ height, updateAspectRatio: true })); - } else { - const { width, height } = calculateNewSize( - controlImage.width / controlImage.height, - optimalDimension * optimalDimension - ); - dispatch(widthChanged({ width, updateAspectRatio: true })); - dispatch(heightChanged({ height, updateAspectRatio: true })); - } - } - }, [controlImage, activeTabName, dispatch, optimalDimension, shift]); - - const handleMouseEnter = useCallback(() => { - setIsMouseOverImage(true); - }, []); - - const handleMouseLeave = useCallback(() => { - setIsMouseOverImage(false); - }, []); - - const draggableData = useMemo(() => { - if (controlImage) { - return { - id, - payloadType: 'IMAGE_DTO', - payload: { imageDTO: controlImage }, - }; - } - }, [controlImage, id]); - - const droppableData = useMemo( - () => ({ - id, - actionType: 'SET_CONTROL_ADAPTER_IMAGE', - context: { id }, - }), - [id] - ); - - const postUploadAction = useMemo(() => ({ type: 'SET_CONTROL_ADAPTER_IMAGE', id }), [id]); - - const shouldShowProcessedImage = - controlImage && - processedControlImage && - !isMouseOverImage && - !pendingControlImages.includes(id) && - processorType !== 'none'; - - useEffect(() => { - if (isConnected && (isErrorControlImage || isErrorProcessedControlImage)) { - handleResetControlImage(); - } - }, [handleResetControlImage, isConnected, isErrorControlImage, isErrorProcessedControlImage]); - - return ( - - - - - - - - <> - : undefined} - tooltip={t('controlnet.resetControlImage')} - /> - : undefined} - tooltip={t('controlnet.saveControlImage')} - styleOverrides={saveControlImageStyleOverrides} - /> - : undefined} - tooltip={shift ? t('controlnet.setControlImageDimensionsForce') : t('controlnet.setControlImageDimensions')} - styleOverrides={setControlImageDimensionsStyleOverrides} - /> - - - {pendingControlImages.includes(id) && ( - - - - )} - - ); -}; - -export default memo(ControlAdapterImagePreview); - -const saveControlImageStyleOverrides: SystemStyleObject = { mt: 6 }; -const setControlImageDimensionsStyleOverrides: SystemStyleObject = { mt: 12 }; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ControlAdapterLayerConfig.tsx b/invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ControlAdapterLayerConfig.tsx deleted file mode 100644 index 29a3502d37..0000000000 --- a/invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ControlAdapterLayerConfig.tsx +++ /dev/null @@ -1,72 +0,0 @@ -import { Box, Flex, Icon, IconButton } from '@invoke-ai/ui-library'; -import ControlAdapterProcessorComponent from 'features/controlAdapters/components/ControlAdapterProcessorComponent'; -import ControlAdapterShouldAutoConfig from 'features/controlAdapters/components/ControlAdapterShouldAutoConfig'; -import ParamControlAdapterIPMethod from 'features/controlAdapters/components/parameters/ParamControlAdapterIPMethod'; -import ParamControlAdapterProcessorSelect from 'features/controlAdapters/components/parameters/ParamControlAdapterProcessorSelect'; -import { useControlAdapterType } from 'features/controlAdapters/hooks/useControlAdapterType'; -import { memo } from 'react'; -import { useTranslation } from 'react-i18next'; -import { PiCaretUpBold } from 'react-icons/pi'; -import { useToggle } from 'react-use'; - -import ControlAdapterImagePreview from './ControlAdapterImagePreview'; -import { ParamControlAdapterBeginEnd } from './ParamControlAdapterBeginEnd'; -import ParamControlAdapterControlMode from './ParamControlAdapterControlMode'; -import ParamControlAdapterModel from './ParamControlAdapterModel'; -import ParamControlAdapterWeight from './ParamControlAdapterWeight'; - -const ControlAdapterLayerConfig = (props: { id: string }) => { - const { id } = props; - const controlAdapterType = useControlAdapterType(id); - const { t } = useTranslation(); - const [isExpanded, toggleIsExpanded] = useToggle(false); - - return ( - - - - {' '} - - - {controlAdapterType !== 'ip_adapter' && ( - - } - /> - )} - - - - {controlAdapterType === 'ip_adapter' && } - {controlAdapterType === 'controlnet' && } - - - - - - - - {isExpanded && ( - <> - - - - - )} - - ); -}; - -export default memo(ControlAdapterLayerConfig); diff --git a/invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ParamControlAdapterBeginEnd.tsx b/invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ParamControlAdapterBeginEnd.tsx deleted file mode 100644 index e4bc07e0b4..0000000000 --- a/invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ParamControlAdapterBeginEnd.tsx +++ /dev/null @@ -1,89 +0,0 @@ -import { CompositeRangeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library'; -import { useAppDispatch } from 'app/store/storeHooks'; -import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; -import { useControlAdapterBeginEndStepPct } from 'features/controlAdapters/hooks/useControlAdapterBeginEndStepPct'; -import { useControlAdapterIsEnabled } from 'features/controlAdapters/hooks/useControlAdapterIsEnabled'; -import { - controlAdapterBeginStepPctChanged, - controlAdapterEndStepPctChanged, -} from 'features/controlAdapters/store/controlAdaptersSlice'; -import { memo, useCallback, useMemo } from 'react'; -import { useTranslation } from 'react-i18next'; - -type Props = { - id: string; -}; - -const formatPct = (v: number) => `${Math.round(v * 100)}%`; - -export const ParamControlAdapterBeginEnd = memo(({ id }: Props) => { - const isEnabled = useControlAdapterIsEnabled(id); - const stepPcts = useControlAdapterBeginEndStepPct(id); - const dispatch = useAppDispatch(); - const { t } = useTranslation(); - - const onChange = useCallback( - (v: [number, number]) => { - dispatch( - controlAdapterBeginStepPctChanged({ - id, - beginStepPct: v[0], - }) - ); - dispatch( - controlAdapterEndStepPctChanged({ - id, - endStepPct: v[1], - }) - ); - }, - [dispatch, id] - ); - - const onReset = useCallback(() => { - dispatch( - controlAdapterBeginStepPctChanged({ - id, - beginStepPct: 0, - }) - ); - dispatch( - controlAdapterEndStepPctChanged({ - id, - endStepPct: 1, - }) - ); - }, [dispatch, id]); - - const value = useMemo<[number, number]>(() => [stepPcts?.beginStepPct ?? 0, stepPcts?.endStepPct ?? 1], [stepPcts]); - - if (!stepPcts) { - return null; - } - - return ( - - - {t('controlnet.beginEndStepPercentShort')} - - - - ); -}); - -ParamControlAdapterBeginEnd.displayName = 'ParamControlAdapterBeginEnd'; - -const ariaLabel = ['Begin Step %', 'End Step %']; diff --git a/invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ParamControlAdapterModel.tsx b/invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ParamControlAdapterModel.tsx deleted file mode 100644 index 73a7d695b3..0000000000 --- a/invokeai/frontend/web/src/features/controlLayers/components/controlAdapterOverrides/ParamControlAdapterModel.tsx +++ /dev/null @@ -1,136 +0,0 @@ -import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library'; -import { Combobox, Flex, FormControl, Tooltip } from '@invoke-ai/ui-library'; -import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; -import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox'; -import { useControlAdapterCLIPVisionModel } from 'features/controlAdapters/hooks/useControlAdapterCLIPVisionModel'; -import { useControlAdapterIsEnabled } from 'features/controlAdapters/hooks/useControlAdapterIsEnabled'; -import { useControlAdapterModel } from 'features/controlAdapters/hooks/useControlAdapterModel'; -import { useControlAdapterModels } from 'features/controlAdapters/hooks/useControlAdapterModels'; -import { useControlAdapterType } from 'features/controlAdapters/hooks/useControlAdapterType'; -import { - controlAdapterCLIPVisionModelChanged, - controlAdapterModelChanged, -} from 'features/controlAdapters/store/controlAdaptersSlice'; -import type { CLIPVisionModel } from 'features/controlAdapters/store/types'; -import { selectGenerationSlice } from 'features/parameters/store/generationSlice'; -import { memo, useCallback, useMemo } from 'react'; -import { useTranslation } from 'react-i18next'; -import type { - AnyModelConfig, - ControlNetModelConfig, - IPAdapterModelConfig, - T2IAdapterModelConfig, -} from 'services/api/types'; - -type ParamControlAdapterModelProps = { - id: string; -}; - -const selectMainModel = createMemoizedSelector(selectGenerationSlice, (generation) => generation.model); - -const ParamControlAdapterModel = ({ id }: ParamControlAdapterModelProps) => { - const isEnabled = useControlAdapterIsEnabled(id); - const controlAdapterType = useControlAdapterType(id); - const { modelConfig } = useControlAdapterModel(id); - const dispatch = useAppDispatch(); - const currentBaseModel = useAppSelector((s) => s.generation.model?.base); - const currentCLIPVisionModel = useControlAdapterCLIPVisionModel(id); - const mainModel = useAppSelector(selectMainModel); - const { t } = useTranslation(); - - const [modelConfigs, { isLoading }] = useControlAdapterModels(controlAdapterType); - - const _onChange = useCallback( - (modelConfig: ControlNetModelConfig | IPAdapterModelConfig | T2IAdapterModelConfig | null) => { - if (!modelConfig) { - return; - } - dispatch( - controlAdapterModelChanged({ - id, - modelConfig, - }) - ); - }, - [dispatch, id] - ); - - const onCLIPVisionModelChange = useCallback( - (v) => { - if (!v?.value) { - return; - } - dispatch(controlAdapterCLIPVisionModelChanged({ id, clipVisionModel: v.value as CLIPVisionModel })); - }, - [dispatch, id] - ); - - const selectedModel = useMemo( - () => (modelConfig && controlAdapterType ? { ...modelConfig, model_type: controlAdapterType } : null), - [controlAdapterType, modelConfig] - ); - - const getIsDisabled = useCallback( - (model: AnyModelConfig): boolean => { - const isCompatible = currentBaseModel === model.base; - const hasMainModel = Boolean(currentBaseModel); - return !hasMainModel || !isCompatible; - }, - [currentBaseModel] - ); - - const { options, value, onChange, noOptionsMessage } = useGroupedModelCombobox({ - modelConfigs, - onChange: _onChange, - selectedModel, - getIsDisabled, - isLoading, - }); - - const clipVisionOptions = useMemo( - () => [ - { label: 'ViT-H', value: 'ViT-H' }, - { label: 'ViT-G', value: 'ViT-G' }, - ], - [] - ); - - const clipVisionModel = useMemo( - () => clipVisionOptions.find((o) => o.value === currentCLIPVisionModel), - [clipVisionOptions, currentCLIPVisionModel] - ); - - return ( - - - - - - - {modelConfig?.type === 'ip_adapter' && modelConfig.format === 'checkpoint' && ( - - - - )} - - ); -}; - -export default memo(ParamControlAdapterModel); diff --git a/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts b/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts new file mode 100644 index 0000000000..17f0d4bf2d --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts @@ -0,0 +1,95 @@ +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { caLayerAdded, ipaLayerAdded, rgLayerIPAdapterAdded } from 'features/controlLayers/store/controlLayersSlice'; +import { + buildControlNet, + buildIPAdapter, + buildT2IAdapter, + CONTROLNET_PROCESSORS, + isProcessorType, +} from 'features/controlLayers/util/controlAdapters'; +import { zModelIdentifierField } from 'features/nodes/types/common'; +import { useCallback, useMemo } from 'react'; +import { useControlNetAndT2IAdapterModels, useIPAdapterModels } from 'services/api/hooks/modelsByType'; +import type { ControlNetModelConfig, IPAdapterModelConfig, T2IAdapterModelConfig } from 'services/api/types'; +import { v4 as uuidv4 } from 'uuid'; + +export const useAddCALayer = () => { + const dispatch = useAppDispatch(); + const baseModel = useAppSelector((s) => s.generation.model?.base); + const [modelConfigs] = useControlNetAndT2IAdapterModels(); + const model: ControlNetModelConfig | T2IAdapterModelConfig | null = useMemo(() => { + // prefer to use a model that matches the base model + const compatibleModels = modelConfigs.filter((m) => (baseModel ? m.base === baseModel : true)); + return compatibleModels[0] ?? modelConfigs[0] ?? null; + }, [baseModel, modelConfigs]); + const isDisabled = useMemo(() => !model, [model]); + const addCALayer = useCallback(() => { + if (!model) { + return; + } + + const id = uuidv4(); + const defaultPreprocessor = model.default_settings?.preprocessor; + const processorConfig = isProcessorType(defaultPreprocessor) + ? CONTROLNET_PROCESSORS[defaultPreprocessor].buildDefaults(baseModel) + : null; + + const builder = model.type === 'controlnet' ? buildControlNet : buildT2IAdapter; + const controlAdapter = builder(id, { + model: zModelIdentifierField.parse(model), + processorConfig, + }); + + dispatch(caLayerAdded(controlAdapter)); + }, [dispatch, model, baseModel]); + + return [addCALayer, isDisabled] as const; +}; + +export const useAddIPALayer = () => { + const dispatch = useAppDispatch(); + const baseModel = useAppSelector((s) => s.generation.model?.base); + const [modelConfigs] = useIPAdapterModels(); + const model: IPAdapterModelConfig | null = useMemo(() => { + // prefer to use a model that matches the base model + const compatibleModels = modelConfigs.filter((m) => (baseModel ? m.base === baseModel : true)); + return compatibleModels[0] ?? modelConfigs[0] ?? null; + }, [baseModel, modelConfigs]); + const isDisabled = useMemo(() => !model, [model]); + const addIPALayer = useCallback(() => { + if (!model) { + return; + } + const id = uuidv4(); + const ipAdapter = buildIPAdapter(id, { + model: zModelIdentifierField.parse(model), + }); + dispatch(ipaLayerAdded(ipAdapter)); + }, [dispatch, model]); + + return [addIPALayer, isDisabled] as const; +}; + +export const useAddIPAdapterToIPALayer = (layerId: string) => { + const dispatch = useAppDispatch(); + const baseModel = useAppSelector((s) => s.generation.model?.base); + const [modelConfigs] = useIPAdapterModels(); + const model: IPAdapterModelConfig | null = useMemo(() => { + // prefer to use a model that matches the base model + const compatibleModels = modelConfigs.filter((m) => (baseModel ? m.base === baseModel : true)); + return compatibleModels[0] ?? modelConfigs[0] ?? null; + }, [baseModel, modelConfigs]); + const isDisabled = useMemo(() => !model, [model]); + const addIPAdapter = useCallback(() => { + if (!model) { + return; + } + const id = uuidv4(); + const ipAdapter = buildIPAdapter(id, { + model: zModelIdentifierField.parse(model), + }); + dispatch(rgLayerIPAdapterAdded({ layerId, ipAdapter })); + }, [dispatch, model, layerId]); + + return [addIPAdapter, isDisabled] as const; +}; diff --git a/invokeai/frontend/web/src/features/controlLayers/hooks/mouseEventHooks.ts b/invokeai/frontend/web/src/features/controlLayers/hooks/mouseEventHooks.ts index bab7ef263f..e3e87d0c42 100644 --- a/invokeai/frontend/web/src/features/controlLayers/hooks/mouseEventHooks.ts +++ b/invokeai/frontend/web/src/features/controlLayers/hooks/mouseEventHooks.ts @@ -9,9 +9,9 @@ import { $lastMouseDownPos, $tool, brushSizeChanged, - maskLayerLineAdded, - maskLayerPointsAdded, - maskLayerRectAdded, + rgLayerLineAdded, + rgLayerPointsAdded, + rgLayerRectAdded, } from 'features/controlLayers/store/controlLayersSlice'; import type Konva from 'konva'; import type { KonvaEventObject } from 'konva/lib/Node'; @@ -71,7 +71,7 @@ export const useMouseEvents = () => { } if (tool === 'brush' || tool === 'eraser') { dispatch( - maskLayerLineAdded({ + rgLayerLineAdded({ layerId: selectedLayerId, points: [pos.x, pos.y, pos.x, pos.y], tool, @@ -94,7 +94,7 @@ export const useMouseEvents = () => { const tool = $tool.get(); if (pos && lastPos && selectedLayerId && tool === 'rect') { dispatch( - maskLayerRectAdded({ + rgLayerRectAdded({ layerId: selectedLayerId, rect: { x: Math.min(pos.x, lastPos.x), @@ -128,7 +128,7 @@ export const useMouseEvents = () => { } } lastCursorPosRef.current = [pos.x, pos.y]; - dispatch(maskLayerPointsAdded({ layerId: selectedLayerId, point: lastCursorPosRef.current })); + dispatch(rgLayerPointsAdded({ layerId: selectedLayerId, point: lastCursorPosRef.current })); } }, [dispatch, selectedLayerId, tool] @@ -149,7 +149,7 @@ export const useMouseEvents = () => { $isMouseDown.get() && (tool === 'brush' || tool === 'eraser') ) { - dispatch(maskLayerPointsAdded({ layerId: selectedLayerId, point: [pos.x, pos.y] })); + dispatch(rgLayerPointsAdded({ layerId: selectedLayerId, point: [pos.x, pos.y] })); } $isMouseOver.set(false); $isMouseDown.set(false); @@ -181,7 +181,7 @@ export const useMouseEvents = () => { } if (tool === 'brush' || tool === 'eraser') { dispatch( - maskLayerLineAdded({ + rgLayerLineAdded({ layerId: selectedLayerId, points: [pos.x, pos.y, pos.x, pos.y], tool, diff --git a/invokeai/frontend/web/src/features/controlLayers/hooks/useControlLayersTitle.ts b/invokeai/frontend/web/src/features/controlLayers/hooks/useControlLayersTitle.ts index 93c8bec8a6..c42a27f28f 100644 --- a/invokeai/frontend/web/src/features/controlLayers/hooks/useControlLayersTitle.ts +++ b/invokeai/frontend/web/src/features/controlLayers/hooks/useControlLayersTitle.ts @@ -5,15 +5,12 @@ import { useMemo } from 'react'; import { useTranslation } from 'react-i18next'; const selectValidLayerCount = createSelector(selectControlLayersSlice, (controlLayers) => { - if (!controlLayers.present.isEnabled) { - return 0; - } const validLayers = controlLayers.present.layers .filter(isRegionalGuidanceLayer) .filter((l) => l.isEnabled) .filter((l) => { const hasTextPrompt = Boolean(l.positivePrompt || l.negativePrompt); - const hasAtLeastOneImagePrompt = l.ipAdapterIds.length > 0; + const hasAtLeastOneImagePrompt = l.ipAdapters.length > 0; return hasTextPrompt || hasAtLeastOneImagePrompt; }); diff --git a/invokeai/frontend/web/src/features/controlLayers/store/controlLayersSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/controlLayersSlice.ts index 6d351d4d0d..c27a98c826 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/controlLayersSlice.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/controlLayersSlice.ts @@ -3,12 +3,22 @@ import { createSlice, isAnyOf } from '@reduxjs/toolkit'; import type { PersistConfig, RootState } from 'app/store/store'; import { moveBackward, moveForward, moveToBack, moveToFront } from 'common/util/arrayUtils'; import { deepClone } from 'common/util/deepClone'; -import { roundToMultiple } from 'common/util/roundDownToMultiple'; +import type { + CLIPVisionModel, + ControlMode, + ControlNetConfig, + IPAdapterConfig, + IPMethod, + ProcessorConfig, + T2IAdapterConfig, +} from 'features/controlLayers/util/controlAdapters'; import { - controlAdapterImageChanged, - controlAdapterProcessedImageChanged, - isAnyControlAdapterAdded, -} from 'features/controlAdapters/store/controlAdaptersSlice'; + buildControlAdapterProcessor, + controlNetToT2IAdapter, + imageDTOToImageWithDims, + t2iAdapterToControlNet, +} from 'features/controlLayers/util/controlAdapters'; +import { zModelIdentifierField } from 'features/nodes/types/common'; import { calculateNewSize } from 'features/parameters/components/ImageSize/calculateNewSize'; import { initialAspectRatioState } from 'features/parameters/components/ImageSize/constants'; import type { AspectRatioState } from 'features/parameters/components/ImageSize/types'; @@ -20,6 +30,7 @@ import { isEqual, partition } from 'lodash-es'; import { atom } from 'nanostores'; import type { RgbColor } from 'react-colorful'; import type { UndoableOptions } from 'redux-undo'; +import type { ControlNetModelConfig, ImageDTO, IPAdapterModelConfig, T2IAdapterModelConfig } from 'services/api/types'; import { assert } from 'tsafe'; import { v4 as uuidv4 } from 'uuid'; @@ -41,13 +52,11 @@ export const initialControlLayersState: ControlLayersState = { brushSize: 100, layers: [], globalMaskLayerOpacity: 0.3, // this globally changes all mask layers' opacity - isEnabled: true, positivePrompt: '', negativePrompt: '', positivePrompt2: '', negativePrompt2: '', shouldConcatPrompts: true, - initialImage: null, size: { width: 512, height: 512, @@ -72,14 +81,45 @@ const resetLayer = (layer: Layer) => { layer.bboxNeedsUpdate = false; return; } +}; - if (layer.type === 'control_adapter_layer') { - // TODO - } +export const selectCALayerOrThrow = (state: ControlLayersState, layerId: string): ControlAdapterLayer => { + const layer = state.layers.find((l) => l.id === layerId); + assert(isControlAdapterLayer(layer)); + return layer; +}; +export const selectIPALayerOrThrow = (state: ControlLayersState, layerId: string): IPAdapterLayer => { + const layer = state.layers.find((l) => l.id === layerId); + assert(isIPAdapterLayer(layer)); + return layer; +}; +const selectCAOrIPALayerOrThrow = ( + state: ControlLayersState, + layerId: string +): ControlAdapterLayer | IPAdapterLayer => { + const layer = state.layers.find((l) => l.id === layerId); + assert(isControlAdapterLayer(layer) || isIPAdapterLayer(layer)); + return layer; +}; +const selectRGLayerOrThrow = (state: ControlLayersState, layerId: string): RegionalGuidanceLayer => { + const layer = state.layers.find((l) => l.id === layerId); + assert(isRegionalGuidanceLayer(layer)); + return layer; +}; +export const selectRGLayerIPAdapterOrThrow = ( + state: ControlLayersState, + layerId: string, + ipAdapterId: string +): IPAdapterConfig => { + const layer = state.layers.find((l) => l.id === layerId); + assert(isRegionalGuidanceLayer(layer)); + const ipAdapter = layer.ipAdapters.find((ipAdapter) => ipAdapter.id === ipAdapterId); + assert(ipAdapter); + return ipAdapter; }; const getVectorMaskPreviewColor = (state: ControlLayersState): RgbColor => { - const vmLayers = state.layers.filter(isRegionalGuidanceLayer); - const lastColor = vmLayers[vmLayers.length - 1]?.previewColor; + const rgLayers = state.layers.filter(isRegionalGuidanceLayer); + const lastColor = rgLayers[rgLayers.length - 1]?.previewColor; return LayerColors.next(lastColor); }; @@ -87,71 +127,7 @@ export const controlLayersSlice = createSlice({ name: 'controlLayers', initialState: initialControlLayersState, reducers: { - //#region All Layers - regionalGuidanceLayerAdded: (state, action: PayloadAction<{ layerId: string }>) => { - const { layerId } = action.payload; - const layer: RegionalGuidanceLayer = { - id: getRegionalGuidanceLayerId(layerId), - type: 'regional_guidance_layer', - isEnabled: true, - bbox: null, - bboxNeedsUpdate: false, - maskObjects: [], - previewColor: getVectorMaskPreviewColor(state), - x: 0, - y: 0, - autoNegative: 'invert', - needsPixelBbox: false, - positivePrompt: '', - negativePrompt: null, - ipAdapterIds: [], - isSelected: true, - }; - state.layers.push(layer); - state.selectedLayerId = layer.id; - for (const layer of state.layers.filter(isRenderableLayer)) { - if (layer.id !== layerId) { - layer.isSelected = false; - } - } - return; - }, - ipAdapterLayerAdded: (state, action: PayloadAction<{ layerId: string; ipAdapterId: string }>) => { - const { layerId, ipAdapterId } = action.payload; - const layer: IPAdapterLayer = { - id: getIPAdapterLayerId(layerId), - type: 'ip_adapter_layer', - isEnabled: true, - ipAdapterId, - }; - state.layers.push(layer); - return; - }, - controlAdapterLayerAdded: (state, action: PayloadAction<{ layerId: string; controlNetId: string }>) => { - const { layerId, controlNetId } = action.payload; - const layer: ControlAdapterLayer = { - id: getControlNetLayerId(layerId), - type: 'control_adapter_layer', - controlNetId, - x: 0, - y: 0, - bbox: null, - bboxNeedsUpdate: false, - isEnabled: true, - imageName: null, - opacity: 1, - isSelected: true, - isFilterEnabled: true, - }; - state.layers.push(layer); - state.selectedLayerId = layer.id; - for (const layer of state.layers.filter(isRenderableLayer)) { - if (layer.id !== layerId) { - layer.isSelected = false; - } - } - return; - }, + //#region Any Layer Type layerSelected: (state, action: PayloadAction) => { for (const layer of state.layers.filter(isRenderableLayer)) { if (layer.id === action.payload) { @@ -235,144 +211,397 @@ export const controlLayersSlice = createSlice({ state.layers = state.layers.filter((l) => l.id !== state.selectedLayerId); state.selectedLayerId = state.layers[0]?.id ?? null; }, - layerOpacityChanged: (state, action: PayloadAction<{ layerId: string; opacity: number }>) => { - const { layerId, opacity } = action.payload; - const layer = state.layers.filter(isControlAdapterLayer).find((l) => l.id === layerId); - if (layer) { - layer.opacity = opacity; - } + allLayersDeleted: (state) => { + state.layers = []; + state.selectedLayerId = null; }, //#endregion //#region CA Layers - isFilterEnabledChanged: (state, action: PayloadAction<{ layerId: string; isFilterEnabled: boolean }>) => { + caLayerAdded: { + reducer: ( + state, + action: PayloadAction<{ layerId: string; controlAdapter: ControlNetConfig | T2IAdapterConfig }> + ) => { + const { layerId, controlAdapter } = action.payload; + const layer: ControlAdapterLayer = { + id: getCALayerId(layerId), + type: 'control_adapter_layer', + x: 0, + y: 0, + bbox: null, + bboxNeedsUpdate: false, + isEnabled: true, + opacity: 1, + isSelected: true, + isFilterEnabled: true, + controlAdapter, + }; + state.layers.push(layer); + state.selectedLayerId = layer.id; + for (const layer of state.layers.filter(isRenderableLayer)) { + if (layer.id !== layerId) { + layer.isSelected = false; + } + } + }, + prepare: (controlAdapter: ControlNetConfig | T2IAdapterConfig) => ({ + payload: { layerId: uuidv4(), controlAdapter }, + }), + }, + caLayerImageChanged: (state, action: PayloadAction<{ layerId: string; imageDTO: ImageDTO | null }>) => { + const { layerId, imageDTO } = action.payload; + const layer = selectCALayerOrThrow(state, layerId); + layer.bbox = null; + layer.bboxNeedsUpdate = true; + layer.isEnabled = true; + if (imageDTO) { + const newImage = imageDTOToImageWithDims(imageDTO); + if (isEqual(newImage, layer.controlAdapter.image)) { + return; + } + layer.controlAdapter.image = newImage; + layer.controlAdapter.processedImage = null; + } else { + layer.controlAdapter.image = null; + layer.controlAdapter.processedImage = null; + } + }, + caLayerProcessedImageChanged: (state, action: PayloadAction<{ layerId: string; imageDTO: ImageDTO | null }>) => { + const { layerId, imageDTO } = action.payload; + const layer = selectCALayerOrThrow(state, layerId); + layer.bbox = null; + layer.bboxNeedsUpdate = true; + layer.isEnabled = true; + layer.controlAdapter.processedImage = imageDTO ? imageDTOToImageWithDims(imageDTO) : null; + }, + caLayerModelChanged: ( + state, + action: PayloadAction<{ + layerId: string; + modelConfig: ControlNetModelConfig | T2IAdapterModelConfig | null; + }> + ) => { + const { layerId, modelConfig } = action.payload; + const layer = selectCALayerOrThrow(state, layerId); + if (!modelConfig) { + layer.controlAdapter.model = null; + return; + } + layer.controlAdapter.model = zModelIdentifierField.parse(modelConfig); + + // We may need to convert the CA to match the model + if (layer.controlAdapter.type === 't2i_adapter' && layer.controlAdapter.model.type === 'controlnet') { + layer.controlAdapter = t2iAdapterToControlNet(layer.controlAdapter); + } else if (layer.controlAdapter.type === 'controlnet' && layer.controlAdapter.model.type === 't2i_adapter') { + layer.controlAdapter = controlNetToT2IAdapter(layer.controlAdapter); + } + + const candidateProcessorConfig = buildControlAdapterProcessor(modelConfig); + if (candidateProcessorConfig?.type !== layer.controlAdapter.processorConfig?.type) { + // The processor has changed. For example, the previous model was a Canny model and the new model is a Depth + // model. We need to use the new processor. + layer.controlAdapter.processedImage = null; + layer.controlAdapter.processorConfig = candidateProcessorConfig; + } + }, + caLayerControlModeChanged: (state, action: PayloadAction<{ layerId: string; controlMode: ControlMode }>) => { + const { layerId, controlMode } = action.payload; + const layer = selectCALayerOrThrow(state, layerId); + assert(layer.controlAdapter.type === 'controlnet'); + layer.controlAdapter.controlMode = controlMode; + }, + caLayerProcessorConfigChanged: ( + state, + action: PayloadAction<{ layerId: string; processorConfig: ProcessorConfig | null }> + ) => { + const { layerId, processorConfig } = action.payload; + const layer = selectCALayerOrThrow(state, layerId); + layer.controlAdapter.processorConfig = processorConfig; + if (!processorConfig) { + layer.controlAdapter.processedImage = null; + } + }, + caLayerIsFilterEnabledChanged: (state, action: PayloadAction<{ layerId: string; isFilterEnabled: boolean }>) => { const { layerId, isFilterEnabled } = action.payload; - const layer = state.layers.filter(isControlAdapterLayer).find((l) => l.id === layerId); - if (layer) { - layer.isFilterEnabled = isFilterEnabled; + const layer = selectCALayerOrThrow(state, layerId); + layer.isFilterEnabled = isFilterEnabled; + }, + caLayerOpacityChanged: (state, action: PayloadAction<{ layerId: string; opacity: number }>) => { + const { layerId, opacity } = action.payload; + const layer = selectCALayerOrThrow(state, layerId); + layer.opacity = opacity; + }, + caLayerIsProcessingImageChanged: ( + state, + action: PayloadAction<{ layerId: string; isProcessingImage: boolean }> + ) => { + const { layerId, isProcessingImage } = action.payload; + const layer = selectCALayerOrThrow(state, layerId); + layer.controlAdapter.isProcessingImage = isProcessingImage; + }, + //#endregion + + //#region IP Adapter Layers + ipaLayerAdded: { + reducer: (state, action: PayloadAction<{ layerId: string; ipAdapter: IPAdapterConfig }>) => { + const { layerId, ipAdapter } = action.payload; + const layer: IPAdapterLayer = { + id: getIPALayerId(layerId), + type: 'ip_adapter_layer', + isEnabled: true, + ipAdapter, + }; + state.layers.push(layer); + }, + prepare: (ipAdapter: IPAdapterConfig) => ({ payload: { layerId: uuidv4(), ipAdapter } }), + }, + ipaLayerImageChanged: (state, action: PayloadAction<{ layerId: string; imageDTO: ImageDTO | null }>) => { + const { layerId, imageDTO } = action.payload; + const layer = selectIPALayerOrThrow(state, layerId); + layer.ipAdapter.image = imageDTO ? imageDTOToImageWithDims(imageDTO) : null; + }, + ipaLayerMethodChanged: (state, action: PayloadAction<{ layerId: string; method: IPMethod }>) => { + const { layerId, method } = action.payload; + const layer = selectIPALayerOrThrow(state, layerId); + layer.ipAdapter.method = method; + }, + ipaLayerModelChanged: ( + state, + action: PayloadAction<{ + layerId: string; + modelConfig: IPAdapterModelConfig | null; + }> + ) => { + const { layerId, modelConfig } = action.payload; + const layer = selectIPALayerOrThrow(state, layerId); + if (!modelConfig) { + layer.ipAdapter.model = null; + return; + } + layer.ipAdapter.model = zModelIdentifierField.parse(modelConfig); + }, + ipaLayerCLIPVisionModelChanged: ( + state, + action: PayloadAction<{ layerId: string; clipVisionModel: CLIPVisionModel }> + ) => { + const { layerId, clipVisionModel } = action.payload; + const layer = selectIPALayerOrThrow(state, layerId); + layer.ipAdapter.clipVisionModel = clipVisionModel; + }, + //#endregion + + //#region CA or IPA Layers + caOrIPALayerWeightChanged: (state, action: PayloadAction<{ layerId: string; weight: number }>) => { + const { layerId, weight } = action.payload; + const layer = selectCAOrIPALayerOrThrow(state, layerId); + if (layer.type === 'control_adapter_layer') { + layer.controlAdapter.weight = weight; + } else { + layer.ipAdapter.weight = weight; + } + }, + caOrIPALayerBeginEndStepPctChanged: ( + state, + action: PayloadAction<{ layerId: string; beginEndStepPct: [number, number] }> + ) => { + const { layerId, beginEndStepPct } = action.payload; + const layer = selectCAOrIPALayerOrThrow(state, layerId); + if (layer.type === 'control_adapter_layer') { + layer.controlAdapter.beginEndStepPct = beginEndStepPct; + } else { + layer.ipAdapter.beginEndStepPct = beginEndStepPct; } }, //#endregion - //#region Mask Layers - maskLayerPositivePromptChanged: (state, action: PayloadAction<{ layerId: string; prompt: string | null }>) => { - const { layerId, prompt } = action.payload; - const layer = state.layers.find((l) => l.id === layerId); - if (layer?.type === 'regional_guidance_layer') { - layer.positivePrompt = prompt; - } - }, - maskLayerNegativePromptChanged: (state, action: PayloadAction<{ layerId: string; prompt: string | null }>) => { - const { layerId, prompt } = action.payload; - const layer = state.layers.find((l) => l.id === layerId); - if (layer?.type === 'regional_guidance_layer') { - layer.negativePrompt = prompt; - } - }, - maskLayerIPAdapterAdded: (state, action: PayloadAction<{ layerId: string; ipAdapterId: string }>) => { - const { layerId, ipAdapterId } = action.payload; - const layer = state.layers.find((l) => l.id === layerId); - if (layer?.type === 'regional_guidance_layer') { - layer.ipAdapterIds.push(ipAdapterId); - } - }, - maskLayerIPAdapterDeleted: (state, action: PayloadAction<{ layerId: string; ipAdapterId: string }>) => { - const { layerId, ipAdapterId } = action.payload; - const layer = state.layers.find((l) => l.id === layerId); - if (layer?.type === 'regional_guidance_layer') { - layer.ipAdapterIds = layer.ipAdapterIds.filter((id) => id !== ipAdapterId); - } - }, - maskLayerPreviewColorChanged: (state, action: PayloadAction<{ layerId: string; color: RgbColor }>) => { - const { layerId, color } = action.payload; - const layer = state.layers.find((l) => l.id === layerId); - if (layer?.type === 'regional_guidance_layer') { - layer.previewColor = color; - } - }, - maskLayerLineAdded: { - reducer: ( - state, - action: PayloadAction< - { layerId: string; points: [number, number, number, number]; tool: DrawingTool }, - string, - { uuid: string } - > - ) => { - const { layerId, points, tool } = action.payload; - const layer = state.layers.find((l) => l.id === layerId); - if (layer?.type === 'regional_guidance_layer') { - const lineId = getRegionalGuidanceLayerLineId(layer.id, action.meta.uuid); - layer.maskObjects.push({ - type: 'vector_mask_line', - tool: tool, - id: lineId, - // Points must be offset by the layer's x and y coordinates - // TODO: Handle this in the event listener? - points: [points[0] - layer.x, points[1] - layer.y, points[2] - layer.x, points[3] - layer.y], - strokeWidth: state.brushSize, - }); - layer.bboxNeedsUpdate = true; - if (!layer.needsPixelBbox && tool === 'eraser') { - layer.needsPixelBbox = true; + //#region RG Layers + rgLayerAdded: { + reducer: (state, action: PayloadAction<{ layerId: string }>) => { + const { layerId } = action.payload; + const layer: RegionalGuidanceLayer = { + id: getRGLayerId(layerId), + type: 'regional_guidance_layer', + isEnabled: true, + bbox: null, + bboxNeedsUpdate: false, + maskObjects: [], + previewColor: getVectorMaskPreviewColor(state), + x: 0, + y: 0, + autoNegative: 'invert', + needsPixelBbox: false, + positivePrompt: '', + negativePrompt: null, + ipAdapters: [], + isSelected: true, + }; + state.layers.push(layer); + state.selectedLayerId = layer.id; + for (const layer of state.layers.filter(isRenderableLayer)) { + if (layer.id !== layerId) { + layer.isSelected = false; } } }, + prepare: () => ({ payload: { layerId: uuidv4() } }), + }, + rgLayerPositivePromptChanged: (state, action: PayloadAction<{ layerId: string; prompt: string | null }>) => { + const { layerId, prompt } = action.payload; + const layer = selectRGLayerOrThrow(state, layerId); + layer.positivePrompt = prompt; + }, + rgLayerNegativePromptChanged: (state, action: PayloadAction<{ layerId: string; prompt: string | null }>) => { + const { layerId, prompt } = action.payload; + const layer = selectRGLayerOrThrow(state, layerId); + layer.negativePrompt = prompt; + }, + rgLayerPreviewColorChanged: (state, action: PayloadAction<{ layerId: string; color: RgbColor }>) => { + const { layerId, color } = action.payload; + const layer = selectRGLayerOrThrow(state, layerId); + layer.previewColor = color; + }, + rgLayerLineAdded: { + reducer: ( + state, + action: PayloadAction<{ + layerId: string; + points: [number, number, number, number]; + tool: DrawingTool; + lineUuid: string; + }> + ) => { + const { layerId, points, tool, lineUuid } = action.payload; + const layer = selectRGLayerOrThrow(state, layerId); + const lineId = getRGLayerLineId(layer.id, lineUuid); + layer.maskObjects.push({ + type: 'vector_mask_line', + tool: tool, + id: lineId, + // Points must be offset by the layer's x and y coordinates + // TODO: Handle this in the event listener? + points: [points[0] - layer.x, points[1] - layer.y, points[2] - layer.x, points[3] - layer.y], + strokeWidth: state.brushSize, + }); + layer.bboxNeedsUpdate = true; + if (!layer.needsPixelBbox && tool === 'eraser') { + layer.needsPixelBbox = true; + } + }, prepare: (payload: { layerId: string; points: [number, number, number, number]; tool: DrawingTool }) => ({ - payload, - meta: { uuid: uuidv4() }, + payload: { ...payload, lineUuid: uuidv4() }, }), }, - maskLayerPointsAdded: (state, action: PayloadAction<{ layerId: string; point: [number, number] }>) => { + rgLayerPointsAdded: (state, action: PayloadAction<{ layerId: string; point: [number, number] }>) => { const { layerId, point } = action.payload; - const layer = state.layers.find((l) => l.id === layerId); - if (layer?.type === 'regional_guidance_layer') { - const lastLine = layer.maskObjects.findLast(isLine); - if (!lastLine) { - return; - } - // Points must be offset by the layer's x and y coordinates - // TODO: Handle this in the event listener - lastLine.points.push(point[0] - layer.x, point[1] - layer.y); - layer.bboxNeedsUpdate = true; + const layer = selectRGLayerOrThrow(state, layerId); + const lastLine = layer.maskObjects.findLast(isLine); + if (!lastLine) { + return; } + // Points must be offset by the layer's x and y coordinates + // TODO: Handle this in the event listener + lastLine.points.push(point[0] - layer.x, point[1] - layer.y); + layer.bboxNeedsUpdate = true; }, - maskLayerRectAdded: { - reducer: (state, action: PayloadAction<{ layerId: string; rect: IRect }, string, { uuid: string }>) => { - const { layerId, rect } = action.payload; + rgLayerRectAdded: { + reducer: (state, action: PayloadAction<{ layerId: string; rect: IRect; rectUuid: string }>) => { + const { layerId, rect, rectUuid } = action.payload; if (rect.height === 0 || rect.width === 0) { // Ignore zero-area rectangles return; } - const layer = state.layers.find((l) => l.id === layerId); - if (layer?.type === 'regional_guidance_layer') { - const id = getMaskedGuidnaceLayerRectId(layer.id, action.meta.uuid); - layer.maskObjects.push({ - type: 'vector_mask_rect', - id, - x: rect.x - layer.x, - y: rect.y - layer.y, - width: rect.width, - height: rect.height, - }); - layer.bboxNeedsUpdate = true; - } + const layer = selectRGLayerOrThrow(state, layerId); + const id = getRGLayerRectId(layer.id, rectUuid); + layer.maskObjects.push({ + type: 'vector_mask_rect', + id, + x: rect.x - layer.x, + y: rect.y - layer.y, + width: rect.width, + height: rect.height, + }); + layer.bboxNeedsUpdate = true; }, - prepare: (payload: { layerId: string; rect: IRect }) => ({ payload, meta: { uuid: uuidv4() } }), + prepare: (payload: { layerId: string; rect: IRect }) => ({ payload: { ...payload, rectUuid: uuidv4() } }), }, - maskLayerAutoNegativeChanged: ( + rgLayerAutoNegativeChanged: ( state, action: PayloadAction<{ layerId: string; autoNegative: ParameterAutoNegative }> ) => { const { layerId, autoNegative } = action.payload; - const layer = state.layers.find((l) => l.id === layerId); - if (layer?.type === 'regional_guidance_layer') { - layer.autoNegative = autoNegative; + const layer = selectRGLayerOrThrow(state, layerId); + layer.autoNegative = autoNegative; + }, + rgLayerIPAdapterAdded: (state, action: PayloadAction<{ layerId: string; ipAdapter: IPAdapterConfig }>) => { + const { layerId, ipAdapter } = action.payload; + const layer = selectRGLayerOrThrow(state, layerId); + layer.ipAdapters.push(ipAdapter); + }, + rgLayerIPAdapterDeleted: (state, action: PayloadAction<{ layerId: string; ipAdapterId: string }>) => { + const { layerId, ipAdapterId } = action.payload; + const layer = selectRGLayerOrThrow(state, layerId); + layer.ipAdapters = layer.ipAdapters.filter((ipAdapter) => ipAdapter.id !== ipAdapterId); + }, + rgLayerIPAdapterImageChanged: ( + state, + action: PayloadAction<{ layerId: string; ipAdapterId: string; imageDTO: ImageDTO | null }> + ) => { + const { layerId, ipAdapterId, imageDTO } = action.payload; + const ipAdapter = selectRGLayerIPAdapterOrThrow(state, layerId, ipAdapterId); + ipAdapter.image = imageDTO ? imageDTOToImageWithDims(imageDTO) : null; + }, + rgLayerIPAdapterWeightChanged: ( + state, + action: PayloadAction<{ layerId: string; ipAdapterId: string; weight: number }> + ) => { + const { layerId, ipAdapterId, weight } = action.payload; + const ipAdapter = selectRGLayerIPAdapterOrThrow(state, layerId, ipAdapterId); + ipAdapter.weight = weight; + }, + rgLayerIPAdapterBeginEndStepPctChanged: ( + state, + action: PayloadAction<{ layerId: string; ipAdapterId: string; beginEndStepPct: [number, number] }> + ) => { + const { layerId, ipAdapterId, beginEndStepPct } = action.payload; + const ipAdapter = selectRGLayerIPAdapterOrThrow(state, layerId, ipAdapterId); + ipAdapter.beginEndStepPct = beginEndStepPct; + }, + rgLayerIPAdapterMethodChanged: ( + state, + action: PayloadAction<{ layerId: string; ipAdapterId: string; method: IPMethod }> + ) => { + const { layerId, ipAdapterId, method } = action.payload; + const ipAdapter = selectRGLayerIPAdapterOrThrow(state, layerId, ipAdapterId); + ipAdapter.method = method; + }, + rgLayerIPAdapterModelChanged: ( + state, + action: PayloadAction<{ + layerId: string; + ipAdapterId: string; + modelConfig: IPAdapterModelConfig | null; + }> + ) => { + const { layerId, ipAdapterId, modelConfig } = action.payload; + const ipAdapter = selectRGLayerIPAdapterOrThrow(state, layerId, ipAdapterId); + if (!modelConfig) { + ipAdapter.model = null; + return; } + ipAdapter.model = zModelIdentifierField.parse(modelConfig); + }, + rgLayerIPAdapterCLIPVisionModelChanged: ( + state, + action: PayloadAction<{ layerId: string; ipAdapterId: string; clipVisionModel: CLIPVisionModel }> + ) => { + const { layerId, ipAdapterId, clipVisionModel } = action.payload; + const ipAdapter = selectRGLayerIPAdapterOrThrow(state, layerId, ipAdapterId); + ipAdapter.clipVisionModel = clipVisionModel; }, //#endregion - //#region Base Layer + //#region Globals positivePromptChanged: (state, action: PayloadAction) => { state.positivePrompt = action.payload; }, @@ -409,18 +638,12 @@ export const controlLayersSlice = createSlice({ aspectRatioChanged: (state, action: PayloadAction) => { state.size.aspectRatio = action.payload; }, - //#endregion - - //#region General brushSizeChanged: (state, action: PayloadAction) => { state.brushSize = Math.round(action.payload); }, globalMaskLayerOpacityChanged: (state, action: PayloadAction) => { state.globalMaskLayerOpacity = action.payload; }, - isEnabledChanged: (state, action: PayloadAction) => { - state.isEnabled = action.payload; - }, undo: (state) => { // Invalidate the bbox for all layers to prevent stale bboxes for (const layer of state.layers.filter(isRenderableLayer)) { @@ -451,36 +674,14 @@ export const controlLayersSlice = createSlice({ state.size.height = height; }); - builder.addCase(controlAdapterImageChanged, (state, action) => { - const { id, controlImage } = action.payload; - const layer = state.layers.filter(isControlAdapterLayer).find((l) => l.controlNetId === id); - if (layer) { - layer.bbox = null; - layer.bboxNeedsUpdate = true; - layer.isEnabled = true; - layer.imageName = controlImage?.image_name ?? null; - } - }); - - builder.addCase(controlAdapterProcessedImageChanged, (state, action) => { - const { id, processedControlImage } = action.payload; - const layer = state.layers.filter(isControlAdapterLayer).find((l) => l.controlNetId === id); - if (layer) { - layer.bbox = null; - layer.bboxNeedsUpdate = true; - layer.isEnabled = true; - layer.imageName = processedControlImage?.image_name ?? null; - } - }); - - // TODO: This is a temp fix to reduce issues with T2I adapter having a different downscaling - // factor than the UNet. Hopefully we get an upstream fix in diffusers. - builder.addMatcher(isAnyControlAdapterAdded, (state, action) => { - if (action.payload.type === 't2i_adapter') { - state.size.width = roundToMultiple(state.size.width, 64); - state.size.height = roundToMultiple(state.size.height, 64); - } - }); + // // TODO: This is a temp fix to reduce issues with T2I adapter having a different downscaling + // // factor than the UNet. Hopefully we get an upstream fix in diffusers. + // builder.addMatcher(isAnyControlAdapterAdded, (state, action) => { + // if (action.payload.type === 't2i_adapter') { + // state.size.width = roundToMultiple(state.size.width, 64); + // state.size.height = roundToMultiple(state.size.height, 64); + // } + // }); }, }); @@ -516,36 +717,57 @@ class LayerColors { } export const { - // All layer actions - layerDeleted, - layerMovedBackward, - layerMovedForward, - layerMovedToBack, - layerMovedToFront, - layerReset, + // Any Layer Type layerSelected, + layerVisibilityToggled, layerTranslated, layerBboxChanged, - layerVisibilityToggled, + layerReset, + layerDeleted, + layerMovedForward, + layerMovedToFront, + layerMovedBackward, + layerMovedToBack, selectedLayerReset, selectedLayerDeleted, - regionalGuidanceLayerAdded, - ipAdapterLayerAdded, - controlAdapterLayerAdded, - layerOpacityChanged, - // CA layer actions - isFilterEnabledChanged, - // Mask layer actions - maskLayerLineAdded, - maskLayerPointsAdded, - maskLayerRectAdded, - maskLayerNegativePromptChanged, - maskLayerPositivePromptChanged, - maskLayerIPAdapterAdded, - maskLayerIPAdapterDeleted, - maskLayerAutoNegativeChanged, - maskLayerPreviewColorChanged, - // Base layer actions + allLayersDeleted, + // CA Layers + caLayerAdded, + caLayerImageChanged, + caLayerProcessedImageChanged, + caLayerModelChanged, + caLayerControlModeChanged, + caLayerProcessorConfigChanged, + caLayerIsFilterEnabledChanged, + caLayerOpacityChanged, + caLayerIsProcessingImageChanged, + // IPA Layers + ipaLayerAdded, + ipaLayerImageChanged, + ipaLayerMethodChanged, + ipaLayerModelChanged, + ipaLayerCLIPVisionModelChanged, + // CA or IPA Layers + caOrIPALayerWeightChanged, + caOrIPALayerBeginEndStepPctChanged, + // RG Layers + rgLayerAdded, + rgLayerPositivePromptChanged, + rgLayerNegativePromptChanged, + rgLayerPreviewColorChanged, + rgLayerLineAdded, + rgLayerPointsAdded, + rgLayerRectAdded, + rgLayerAutoNegativeChanged, + rgLayerIPAdapterAdded, + rgLayerIPAdapterDeleted, + rgLayerIPAdapterImageChanged, + rgLayerIPAdapterWeightChanged, + rgLayerIPAdapterBeginEndStepPctChanged, + rgLayerIPAdapterMethodChanged, + rgLayerIPAdapterModelChanged, + rgLayerIPAdapterCLIPVisionModelChanged, + // Globals positivePromptChanged, negativePromptChanged, positivePrompt2Changed, @@ -554,27 +776,12 @@ export const { widthChanged, heightChanged, aspectRatioChanged, - // General actions brushSizeChanged, globalMaskLayerOpacityChanged, undo, redo, } = controlLayersSlice.actions; -export const selectAllControlAdapterIds = (controlLayers: ControlLayersState) => - controlLayers.layers.flatMap((l) => { - if (l.type === 'control_adapter_layer') { - return [l.controlNetId]; - } - if (l.type === 'ip_adapter_layer') { - return [l.ipAdapterId]; - } - if (l.type === 'regional_guidance_layer') { - return l.ipAdapterIds; - } - return []; - }); - export const selectControlLayersSlice = (state: RootState) => state.controlLayers; /* eslint-disable-next-line @typescript-eslint/no-explicit-any */ @@ -600,24 +807,23 @@ export const BACKGROUND_RECT_ID = 'background_layer.rect'; export const NO_LAYERS_MESSAGE_LAYER_ID = 'no_layers_message'; // Names (aka classes) for Konva layers and objects -export const CONTROLNET_LAYER_NAME = 'control_adapter_layer'; -export const CONTROLNET_LAYER_IMAGE_NAME = 'control_adapter_layer.image'; -export const regional_guidance_layer_NAME = 'regional_guidance_layer'; -export const regional_guidance_layer_LINE_NAME = 'regional_guidance_layer.line'; -export const regional_guidance_layer_OBJECT_GROUP_NAME = 'regional_guidance_layer.object_group'; -export const regional_guidance_layer_RECT_NAME = 'regional_guidance_layer.rect'; +export const CA_LAYER_NAME = 'control_adapter_layer'; +export const CA_LAYER_IMAGE_NAME = 'control_adapter_layer.image'; +export const RG_LAYER_NAME = 'regional_guidance_layer'; +export const RG_LAYER_LINE_NAME = 'regional_guidance_layer.line'; +export const RG_LAYER_OBJECT_GROUP_NAME = 'regional_guidance_layer.object_group'; +export const RG_LAYER_RECT_NAME = 'regional_guidance_layer.rect'; export const LAYER_BBOX_NAME = 'layer.bbox'; // Getters for non-singleton layer and object IDs -const getRegionalGuidanceLayerId = (layerId: string) => `${regional_guidance_layer_NAME}_${layerId}`; -const getRegionalGuidanceLayerLineId = (layerId: string, lineId: string) => `${layerId}.line_${lineId}`; -const getMaskedGuidnaceLayerRectId = (layerId: string, lineId: string) => `${layerId}.rect_${lineId}`; -export const getRegionalGuidanceLayerObjectGroupId = (layerId: string, groupId: string) => - `${layerId}.objectGroup_${groupId}`; +const getRGLayerId = (layerId: string) => `${RG_LAYER_NAME}_${layerId}`; +const getRGLayerLineId = (layerId: string, lineId: string) => `${layerId}.line_${lineId}`; +const getRGLayerRectId = (layerId: string, lineId: string) => `${layerId}.rect_${lineId}`; +export const getRGLayerObjectGroupId = (layerId: string, groupId: string) => `${layerId}.objectGroup_${groupId}`; export const getLayerBboxId = (layerId: string) => `${layerId}.bbox`; -const getControlNetLayerId = (layerId: string) => `control_adapter_layer_${layerId}`; -export const getControlNetLayerImageId = (layerId: string, imageName: string) => `${layerId}.image_${imageName}`; -const getIPAdapterLayerId = (layerId: string) => `ip_adapter_layer_${layerId}`; +const getCALayerId = (layerId: string) => `control_adapter_layer_${layerId}`; +export const getCALayerImageId = (layerId: string, imageName: string) => `${layerId}.image_${imageName}`; +const getIPALayerId = (layerId: string) => `ip_adapter_layer_${layerId}`; export const controlLayersPersistConfig: PersistConfig = { name: controlLayersSlice.name, @@ -631,9 +837,13 @@ const undoableGroupByMatcher = isAnyOf( layerTranslated, brushSizeChanged, globalMaskLayerOpacityChanged, - maskLayerPositivePromptChanged, - maskLayerNegativePromptChanged, - maskLayerPreviewColorChanged + positivePromptChanged, + negativePromptChanged, + positivePrompt2Changed, + negativePrompt2Changed, + rgLayerPositivePromptChanged, + rgLayerNegativePromptChanged, + rgLayerPreviewColorChanged ); // These are used to group actions into logical lines below (hate typos) @@ -645,13 +855,13 @@ export const controlLayersUndoableConfig: UndoableOptions { - // Lines are started with `maskLayerLineAdded` and may have any number of subsequent `maskLayerPointsAdded` events. + // Lines are started with `rgLayerLineAdded` and may have any number of subsequent `rgLayerPointsAdded` events. // We can use a double-buffer-esque trick to group each "logical" line as a single undoable action, without grouping // separate logical lines as a single undo action. - if (maskLayerLineAdded.match(action)) { + if (rgLayerLineAdded.match(action)) { return history.group === LINE_1 ? LINE_2 : LINE_1; } - if (maskLayerPointsAdded.match(action)) { + if (rgLayerPointsAdded.match(action)) { if (history.group === LINE_1 || history.group === LINE_2) { return history.group; } diff --git a/invokeai/frontend/web/src/features/controlLayers/store/types.ts b/invokeai/frontend/web/src/features/controlLayers/store/types.ts index 58b25f967b..a4d88f3a0a 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/types.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/types.ts @@ -1,3 +1,4 @@ +import type { ControlNetConfig, IPAdapterConfig, T2IAdapterConfig } from 'features/controlLayers/util/controlAdapters'; import type { AspectRatioState } from 'features/parameters/components/ImageSize/types'; import type { ParameterAutoNegative, @@ -47,15 +48,14 @@ type RenderableLayerBase = LayerBase & { export type ControlAdapterLayer = RenderableLayerBase & { type: 'control_adapter_layer'; // technically, also t2i adapter layer - controlNetId: string; - imageName: string | null; opacity: number; isFilterEnabled: boolean; + controlAdapter: ControlNetConfig | T2IAdapterConfig; }; export type IPAdapterLayer = LayerBase & { - type: 'ip_adapter_layer'; // technically, also t2i adapter layer - ipAdapterId: string; + type: 'ip_adapter_layer'; + ipAdapter: IPAdapterConfig; }; export type RegionalGuidanceLayer = RenderableLayerBase & { @@ -63,7 +63,7 @@ export type RegionalGuidanceLayer = RenderableLayerBase & { maskObjects: (VectorMaskLine | VectorMaskRect)[]; positivePrompt: ParameterPositivePrompt | null; negativePrompt: ParameterNegativePrompt | null; // Up to one text prompt per mask - ipAdapterIds: string[]; // Any number of image prompts + ipAdapters: IPAdapterConfig[]; // Any number of image prompts previewColor: RgbColor; autoNegative: ParameterAutoNegative; needsPixelBbox: boolean; // Needs the slower pixel-based bbox calculation - set to true when an there is an eraser object @@ -77,13 +77,11 @@ export type ControlLayersState = { layers: Layer[]; brushSize: number; globalMaskLayerOpacity: number; - isEnabled: boolean; positivePrompt: ParameterPositivePrompt; negativePrompt: ParameterNegativePrompt; positivePrompt2: ParameterPositiveStylePromptSDXL; negativePrompt2: ParameterNegativeStylePromptSDXL; shouldConcatPrompts: boolean; - initialImage: string | null; size: { width: ParameterWidth; height: ParameterHeight; diff --git a/invokeai/frontend/web/src/features/controlLayers/util/bbox.ts b/invokeai/frontend/web/src/features/controlLayers/util/bbox.ts index 3c2915e0ab..a4c7be6886 100644 --- a/invokeai/frontend/web/src/features/controlLayers/util/bbox.ts +++ b/invokeai/frontend/web/src/features/controlLayers/util/bbox.ts @@ -1,6 +1,6 @@ import openBase64ImageInTab from 'common/util/openBase64ImageInTab'; import { imageDataToDataURL } from 'features/canvas/util/blobToDataURL'; -import { regional_guidance_layer_OBJECT_GROUP_NAME } from 'features/controlLayers/store/controlLayersSlice'; +import { RG_LAYER_OBJECT_GROUP_NAME } from 'features/controlLayers/store/controlLayersSlice'; import Konva from 'konva'; import type { Layer as KonvaLayerType } from 'konva/lib/Layer'; import type { IRect } from 'konva/lib/types'; @@ -81,7 +81,7 @@ export const getLayerBboxPixels = (layer: KonvaLayerType, preview: boolean = fal offscreenStage.add(layerClone); for (const child of layerClone.getChildren()) { - if (child.name() === regional_guidance_layer_OBJECT_GROUP_NAME) { + if (child.name() === RG_LAYER_OBJECT_GROUP_NAME) { // We need to cache the group to ensure it composites out eraser strokes correctly child.opacity(1); child.cache(); diff --git a/invokeai/frontend/web/src/features/controlLayers/util/controlAdapters.test.ts b/invokeai/frontend/web/src/features/controlLayers/util/controlAdapters.test.ts new file mode 100644 index 0000000000..656b759faa --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/util/controlAdapters.test.ts @@ -0,0 +1,23 @@ +import type { S } from 'services/api/types'; +import type { Equals } from 'tsafe'; +import { assert } from 'tsafe'; +import { describe, test } from 'vitest'; + +import type { + CLIPVisionModel, + ControlMode, + DepthAnythingModelSize, + IPMethod, + ProcessorConfig, + ProcessorType, +} from './controlAdapters'; + +describe('Control Adapter Types', () => { + test('ProcessorType', () => assert>()); + test('IP Adapter Method', () => assert, IPMethod>>()); + test('CLIP Vision Model', () => + assert, CLIPVisionModel>>()); + test('Control Mode', () => assert, ControlMode>>()); + test('DepthAnything Model Size', () => + assert, DepthAnythingModelSize>>()); +}); diff --git a/invokeai/frontend/web/src/features/controlLayers/util/controlAdapters.ts b/invokeai/frontend/web/src/features/controlLayers/util/controlAdapters.ts new file mode 100644 index 0000000000..6cedc81a0b --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/util/controlAdapters.ts @@ -0,0 +1,483 @@ +import { deepClone } from 'common/util/deepClone'; +import type { + ParameterControlNetModel, + ParameterIPAdapterModel, + ParameterT2IAdapterModel, +} from 'features/parameters/types/parameterSchemas'; +import { merge, omit } from 'lodash-es'; +import type { + BaseModelType, + CannyImageProcessorInvocation, + ColorMapImageProcessorInvocation, + ContentShuffleImageProcessorInvocation, + ControlNetModelConfig, + DepthAnythingImageProcessorInvocation, + DWOpenposeImageProcessorInvocation, + Graph, + HedImageProcessorInvocation, + ImageDTO, + LineartAnimeImageProcessorInvocation, + LineartImageProcessorInvocation, + MediapipeFaceProcessorInvocation, + MidasDepthImageProcessorInvocation, + MlsdImageProcessorInvocation, + NormalbaeImageProcessorInvocation, + PidiImageProcessorInvocation, + T2IAdapterModelConfig, + ZoeDepthImageProcessorInvocation, +} from 'services/api/types'; +import { z } from 'zod'; + +const zDepthAnythingModelSize = z.enum(['large', 'base', 'small']); +export type DepthAnythingModelSize = z.infer; +export const isDepthAnythingModelSize = (v: unknown): v is DepthAnythingModelSize => + zDepthAnythingModelSize.safeParse(v).success; + +export type CannyProcessorConfig = Required< + Pick +>; +export type ColorMapProcessorConfig = Required< + Pick +>; +export type ContentShuffleProcessorConfig = Required< + Pick +>; +export type DepthAnythingProcessorConfig = Required< + Pick +>; +export type HedProcessorConfig = Required>; +type LineartAnimeProcessorConfig = Required>; +export type LineartProcessorConfig = Required>; +export type MediapipeFaceProcessorConfig = Required< + Pick +>; +export type MidasDepthProcessorConfig = Required< + Pick +>; +export type MlsdProcessorConfig = Required>; +type NormalbaeProcessorConfig = Required>; +export type DWOpenposeProcessorConfig = Required< + Pick +>; +export type PidiProcessorConfig = Required>; +type ZoeDepthProcessorConfig = Required>; + +export type ProcessorConfig = + | CannyProcessorConfig + | ColorMapProcessorConfig + | ContentShuffleProcessorConfig + | DepthAnythingProcessorConfig + | HedProcessorConfig + | LineartAnimeProcessorConfig + | LineartProcessorConfig + | MediapipeFaceProcessorConfig + | MidasDepthProcessorConfig + | MlsdProcessorConfig + | NormalbaeProcessorConfig + | DWOpenposeProcessorConfig + | PidiProcessorConfig + | ZoeDepthProcessorConfig; + +export type ImageWithDims = { + imageName: string; + width: number; + height: number; +}; + +type ControlAdapterBase = { + id: string; + weight: number; + image: ImageWithDims | null; + processedImage: ImageWithDims | null; + isProcessingImage: boolean; + processorConfig: ProcessorConfig | null; + beginEndStepPct: [number, number]; +}; + +const zControlMode = z.enum(['balanced', 'more_prompt', 'more_control', 'unbalanced']); +export type ControlMode = z.infer; +export const isControlMode = (v: unknown): v is ControlMode => zControlMode.safeParse(v).success; + +export type ControlNetConfig = ControlAdapterBase & { + type: 'controlnet'; + model: ParameterControlNetModel | null; + controlMode: ControlMode; +}; +export const isControlNetConfig = (ca: ControlNetConfig | T2IAdapterConfig): ca is ControlNetConfig => + ca.type === 'controlnet'; + +export type T2IAdapterConfig = ControlAdapterBase & { + type: 't2i_adapter'; + model: ParameterT2IAdapterModel | null; +}; +export const isT2IAdapterConfig = (ca: ControlNetConfig | T2IAdapterConfig): ca is T2IAdapterConfig => + ca.type === 't2i_adapter'; + +const zCLIPVisionModel = z.enum(['ViT-H', 'ViT-G']); +export type CLIPVisionModel = z.infer; +export const isCLIPVisionModel = (v: unknown): v is CLIPVisionModel => zCLIPVisionModel.safeParse(v).success; + +const zIPMethod = z.enum(['full', 'style', 'composition']); +export type IPMethod = z.infer; +export const isIPMethod = (v: unknown): v is IPMethod => zIPMethod.safeParse(v).success; + +export type IPAdapterConfig = { + id: string; + type: 'ip_adapter'; + weight: number; + method: IPMethod; + image: ImageWithDims | null; + model: ParameterIPAdapterModel | null; + clipVisionModel: CLIPVisionModel; + beginEndStepPct: [number, number]; +}; + +const zProcessorType = z.enum([ + 'canny_image_processor', + 'color_map_image_processor', + 'content_shuffle_image_processor', + 'depth_anything_image_processor', + 'hed_image_processor', + 'lineart_anime_image_processor', + 'lineart_image_processor', + 'mediapipe_face_processor', + 'midas_depth_image_processor', + 'mlsd_image_processor', + 'normalbae_image_processor', + 'dw_openpose_image_processor', + 'pidi_image_processor', + 'zoe_depth_image_processor', +]); +export type ProcessorType = z.infer; +export const isProcessorType = (v: unknown): v is ProcessorType => zProcessorType.safeParse(v).success; + +type ProcessorData = { + type: T; + labelTKey: string; + descriptionTKey: string; + buildDefaults(baseModel?: BaseModelType): Extract; + buildNode( + image: ImageWithDims, + config: Extract + ): Extract; +}; + +const minDim = (image: ImageWithDims): number => Math.min(image.width, image.height); + +type CAProcessorsData = { + [key in ProcessorType]: ProcessorData; +}; +/** + * A dict of ControlNet processors, including: + * - label translation key + * - description translation key + * - a builder to create default values for the config + * - a builder to create the node for the config + * + * TODO: Generate from the OpenAPI schema + */ +export const CONTROLNET_PROCESSORS: CAProcessorsData = { + canny_image_processor: { + type: 'canny_image_processor', + labelTKey: 'controlnet.canny', + descriptionTKey: 'controlnet.cannyDescription', + buildDefaults: () => ({ + id: 'canny_image_processor', + type: 'canny_image_processor', + low_threshold: 100, + high_threshold: 200, + }), + buildNode: (image, config) => ({ + ...config, + type: 'canny_image_processor', + image: { image_name: image.imageName }, + detect_resolution: minDim(image), + image_resolution: minDim(image), + }), + }, + color_map_image_processor: { + type: 'color_map_image_processor', + labelTKey: 'controlnet.colorMap', + descriptionTKey: 'controlnet.colorMapDescription', + buildDefaults: () => ({ + id: 'color_map_image_processor', + type: 'color_map_image_processor', + color_map_tile_size: 64, + }), + buildNode: (image, config) => ({ + ...config, + type: 'color_map_image_processor', + image: { image_name: image.imageName }, + }), + }, + content_shuffle_image_processor: { + type: 'content_shuffle_image_processor', + labelTKey: 'controlnet.contentShuffle', + descriptionTKey: 'controlnet.contentShuffleDescription', + buildDefaults: (baseModel) => ({ + id: 'content_shuffle_image_processor', + type: 'content_shuffle_image_processor', + h: baseModel === 'sdxl' ? 1024 : 512, + w: baseModel === 'sdxl' ? 1024 : 512, + f: baseModel === 'sdxl' ? 512 : 256, + }), + buildNode: (image, config) => ({ + ...config, + image: { image_name: image.imageName }, + detect_resolution: minDim(image), + image_resolution: minDim(image), + }), + }, + depth_anything_image_processor: { + type: 'depth_anything_image_processor', + labelTKey: 'controlnet.depthAnything', + descriptionTKey: 'controlnet.depthAnythingDescription', + buildDefaults: () => ({ + id: 'depth_anything_image_processor', + type: 'depth_anything_image_processor', + model_size: 'small', + }), + buildNode: (image, config) => ({ + ...config, + image: { image_name: image.imageName }, + resolution: minDim(image), + }), + }, + hed_image_processor: { + type: 'hed_image_processor', + labelTKey: 'controlnet.hed', + descriptionTKey: 'controlnet.hedDescription', + buildDefaults: () => ({ + id: 'hed_image_processor', + type: 'hed_image_processor', + scribble: false, + }), + buildNode: (image, config) => ({ + ...config, + image: { image_name: image.imageName }, + detect_resolution: minDim(image), + image_resolution: minDim(image), + }), + }, + lineart_anime_image_processor: { + type: 'lineart_anime_image_processor', + labelTKey: 'controlnet.lineartAnime', + descriptionTKey: 'controlnet.lineartAnimeDescription', + buildDefaults: () => ({ + id: 'lineart_anime_image_processor', + type: 'lineart_anime_image_processor', + }), + buildNode: (image, config) => ({ + ...config, + image: { image_name: image.imageName }, + detect_resolution: minDim(image), + image_resolution: minDim(image), + }), + }, + lineart_image_processor: { + type: 'lineart_image_processor', + labelTKey: 'controlnet.lineart', + descriptionTKey: 'controlnet.lineartDescription', + buildDefaults: () => ({ + id: 'lineart_image_processor', + type: 'lineart_image_processor', + coarse: false, + }), + buildNode: (image, config) => ({ + ...config, + image: { image_name: image.imageName }, + detect_resolution: minDim(image), + image_resolution: minDim(image), + }), + }, + mediapipe_face_processor: { + type: 'mediapipe_face_processor', + labelTKey: 'controlnet.mediapipeFace', + descriptionTKey: 'controlnet.mediapipeFaceDescription', + buildDefaults: () => ({ + id: 'mediapipe_face_processor', + type: 'mediapipe_face_processor', + max_faces: 1, + min_confidence: 0.5, + }), + buildNode: (image, config) => ({ + ...config, + image: { image_name: image.imageName }, + detect_resolution: minDim(image), + image_resolution: minDim(image), + }), + }, + midas_depth_image_processor: { + type: 'midas_depth_image_processor', + labelTKey: 'controlnet.depthMidas', + descriptionTKey: 'controlnet.depthMidasDescription', + buildDefaults: () => ({ + id: 'midas_depth_image_processor', + type: 'midas_depth_image_processor', + a_mult: 2, + bg_th: 0.1, + }), + buildNode: (image, config) => ({ + ...config, + image: { image_name: image.imageName }, + detect_resolution: minDim(image), + image_resolution: minDim(image), + }), + }, + mlsd_image_processor: { + type: 'mlsd_image_processor', + labelTKey: 'controlnet.mlsd', + descriptionTKey: 'controlnet.mlsdDescription', + buildDefaults: () => ({ + id: 'mlsd_image_processor', + type: 'mlsd_image_processor', + thr_d: 0.1, + thr_v: 0.1, + }), + buildNode: (image, config) => ({ + ...config, + image: { image_name: image.imageName }, + detect_resolution: minDim(image), + image_resolution: minDim(image), + }), + }, + normalbae_image_processor: { + type: 'normalbae_image_processor', + labelTKey: 'controlnet.normalBae', + descriptionTKey: 'controlnet.normalBaeDescription', + buildDefaults: () => ({ + id: 'normalbae_image_processor', + type: 'normalbae_image_processor', + }), + buildNode: (image, config) => ({ + ...config, + image: { image_name: image.imageName }, + detect_resolution: minDim(image), + image_resolution: minDim(image), + }), + }, + dw_openpose_image_processor: { + type: 'dw_openpose_image_processor', + labelTKey: 'controlnet.dwOpenpose', + descriptionTKey: 'controlnet.dwOpenposeDescription', + buildDefaults: () => ({ + id: 'dw_openpose_image_processor', + type: 'dw_openpose_image_processor', + draw_body: true, + draw_face: false, + draw_hands: false, + }), + buildNode: (image, config) => ({ + ...config, + image: { image_name: image.imageName }, + image_resolution: minDim(image), + }), + }, + pidi_image_processor: { + type: 'pidi_image_processor', + labelTKey: 'controlnet.pidi', + descriptionTKey: 'controlnet.pidiDescription', + buildDefaults: () => ({ + id: 'pidi_image_processor', + type: 'pidi_image_processor', + scribble: false, + safe: false, + }), + buildNode: (image, config) => ({ + ...config, + image: { image_name: image.imageName }, + detect_resolution: minDim(image), + image_resolution: minDim(image), + }), + }, + zoe_depth_image_processor: { + type: 'zoe_depth_image_processor', + labelTKey: 'controlnet.depthZoe', + descriptionTKey: 'controlnet.depthZoeDescription', + buildDefaults: () => ({ + id: 'zoe_depth_image_processor', + type: 'zoe_depth_image_processor', + }), + buildNode: (image, config) => ({ + ...config, + image: { image_name: image.imageName }, + }), + }, +}; + +const initialControlNet: Omit = { + type: 'controlnet', + model: null, + weight: 1, + beginEndStepPct: [0, 1], + controlMode: 'balanced', + image: null, + processedImage: null, + isProcessingImage: false, + processorConfig: CONTROLNET_PROCESSORS.canny_image_processor.buildDefaults(), +}; + +const initialT2IAdapter: Omit = { + type: 't2i_adapter', + model: null, + weight: 1, + beginEndStepPct: [0, 1], + image: null, + processedImage: null, + isProcessingImage: false, + processorConfig: CONTROLNET_PROCESSORS.canny_image_processor.buildDefaults(), +}; + +const initialIPAdapter: Omit = { + type: 'ip_adapter', + image: null, + model: null, + beginEndStepPct: [0, 1], + method: 'full', + clipVisionModel: 'ViT-H', + weight: 1, +}; + +export const buildControlNet = (id: string, overrides?: Partial): ControlNetConfig => { + return merge(deepClone(initialControlNet), { id, ...overrides }); +}; + +export const buildT2IAdapter = (id: string, overrides?: Partial): T2IAdapterConfig => { + return merge(deepClone(initialT2IAdapter), { id, ...overrides }); +}; + +export const buildIPAdapter = (id: string, overrides?: Partial): IPAdapterConfig => { + return merge(deepClone(initialIPAdapter), { id, ...overrides }); +}; + +export const buildControlAdapterProcessor = ( + modelConfig: ControlNetModelConfig | T2IAdapterModelConfig +): ProcessorConfig | null => { + const defaultPreprocessor = modelConfig.default_settings?.preprocessor; + if (!isProcessorType(defaultPreprocessor)) { + return null; + } + const processorConfig = CONTROLNET_PROCESSORS[defaultPreprocessor].buildDefaults(modelConfig.base); + return processorConfig; +}; + +export const imageDTOToImageWithDims = ({ image_name, width, height }: ImageDTO): ImageWithDims => ({ + imageName: image_name, + width, + height, +}); + +export const t2iAdapterToControlNet = (t2iAdapter: T2IAdapterConfig): ControlNetConfig => { + return { + ...deepClone(t2iAdapter), + type: 'controlnet', + controlMode: initialControlNet.controlMode, + }; +}; + +export const controlNetToT2IAdapter = (controlNet: ControlNetConfig): T2IAdapterConfig => { + return { + ...omit(deepClone(controlNet), 'controlMode'), + type: 't2i_adapter', + }; +}; diff --git a/invokeai/frontend/web/src/features/controlLayers/util/getLayerBlobs.ts b/invokeai/frontend/web/src/features/controlLayers/util/getLayerBlobs.ts index 1b0808c5f1..2ad3e0c90c 100644 --- a/invokeai/frontend/web/src/features/controlLayers/util/getLayerBlobs.ts +++ b/invokeai/frontend/web/src/features/controlLayers/util/getLayerBlobs.ts @@ -1,7 +1,7 @@ import { getStore } from 'app/store/nanostores/store'; import openBase64ImageInTab from 'common/util/openBase64ImageInTab'; import { blobToDataURL } from 'features/canvas/util/blobToDataURL'; -import { isRegionalGuidanceLayer, regional_guidance_layer_NAME } from 'features/controlLayers/store/controlLayersSlice'; +import { isRegionalGuidanceLayer, RG_LAYER_NAME } from 'features/controlLayers/store/controlLayersSlice'; import { renderers } from 'features/controlLayers/util/renderers'; import Konva from 'konva'; import { assert } from 'tsafe'; @@ -24,7 +24,7 @@ export const getRegionalPromptLayerBlobs = async ( const stage = new Konva.Stage({ container, width, height }); renderers.renderLayers(stage, reduxLayers, 1, 'brush'); - const konvaLayers = stage.find(`.${regional_guidance_layer_NAME}`); + const konvaLayers = stage.find(`.${RG_LAYER_NAME}`); const blobs: Record = {}; // First remove all layers diff --git a/invokeai/frontend/web/src/features/controlLayers/util/renderers.ts b/invokeai/frontend/web/src/features/controlLayers/util/renderers.ts index b2f04a88c1..85f48baa6e 100644 --- a/invokeai/frontend/web/src/features/controlLayers/util/renderers.ts +++ b/invokeai/frontend/web/src/features/controlLayers/util/renderers.ts @@ -5,20 +5,20 @@ import { $tool, BACKGROUND_LAYER_ID, BACKGROUND_RECT_ID, - CONTROLNET_LAYER_IMAGE_NAME, - CONTROLNET_LAYER_NAME, - getControlNetLayerImageId, + CA_LAYER_IMAGE_NAME, + CA_LAYER_NAME, + getCALayerImageId, getLayerBboxId, - getRegionalGuidanceLayerObjectGroupId, + getRGLayerObjectGroupId, isControlAdapterLayer, isRegionalGuidanceLayer, isRenderableLayer, LAYER_BBOX_NAME, NO_LAYERS_MESSAGE_LAYER_ID, - regional_guidance_layer_LINE_NAME, - regional_guidance_layer_NAME, - regional_guidance_layer_OBJECT_GROUP_NAME, - regional_guidance_layer_RECT_NAME, + RG_LAYER_LINE_NAME, + RG_LAYER_NAME, + RG_LAYER_OBJECT_GROUP_NAME, + RG_LAYER_RECT_NAME, TOOL_PREVIEW_BRUSH_BORDER_INNER_ID, TOOL_PREVIEW_BRUSH_BORDER_OUTER_ID, TOOL_PREVIEW_BRUSH_FILL_ID, @@ -52,11 +52,10 @@ const STAGE_BG_DATAURL = const mapId = (object: { id: string }) => object.id; -const selectRenderableLayers = (n: Konva.Node) => - n.name() === regional_guidance_layer_NAME || n.name() === CONTROLNET_LAYER_NAME; +const selectRenderableLayers = (n: Konva.Node) => n.name() === RG_LAYER_NAME || n.name() === CA_LAYER_NAME; const selectVectorMaskObjects = (node: Konva.Node) => { - return node.name() === regional_guidance_layer_LINE_NAME || node.name() === regional_guidance_layer_RECT_NAME; + return node.name() === RG_LAYER_LINE_NAME || node.name() === RG_LAYER_RECT_NAME; }; /** @@ -141,7 +140,7 @@ const renderToolPreview = ( isMouseOver: boolean, brushSize: number ) => { - const layerCount = stage.find(`.${regional_guidance_layer_NAME}`).length; + const layerCount = stage.find(`.${RG_LAYER_NAME}`).length; // Update the stage's pointer style if (layerCount === 0) { // We have no layers, so we should not render any tool @@ -233,7 +232,7 @@ const createRegionalGuidanceLayer = ( // This layer hasn't been added to the konva state yet const konvaLayer = new Konva.Layer({ id: reduxLayer.id, - name: regional_guidance_layer_NAME, + name: RG_LAYER_NAME, draggable: true, dragDistance: 0, }); @@ -265,8 +264,8 @@ const createRegionalGuidanceLayer = ( // The object group holds all of the layer's objects (e.g. lines and rects) const konvaObjectGroup = new Konva.Group({ - id: getRegionalGuidanceLayerObjectGroupId(reduxLayer.id, uuidv4()), - name: regional_guidance_layer_OBJECT_GROUP_NAME, + id: getRGLayerObjectGroupId(reduxLayer.id, uuidv4()), + name: RG_LAYER_OBJECT_GROUP_NAME, listening: false, }); konvaLayer.add(konvaObjectGroup); @@ -285,7 +284,7 @@ const createVectorMaskLine = (reduxObject: VectorMaskLine, konvaGroup: Konva.Gro const vectorMaskLine = new Konva.Line({ id: reduxObject.id, key: reduxObject.id, - name: regional_guidance_layer_LINE_NAME, + name: RG_LAYER_LINE_NAME, strokeWidth: reduxObject.strokeWidth, tension: 0, lineCap: 'round', @@ -307,7 +306,7 @@ const createVectorMaskRect = (reduxObject: VectorMaskRect, konvaGroup: Konva.Gro const vectorMaskRect = new Konva.Rect({ id: reduxObject.id, key: reduxObject.id, - name: regional_guidance_layer_RECT_NAME, + name: RG_LAYER_RECT_NAME, x: reduxObject.x, y: reduxObject.y, width: reduxObject.width, @@ -347,7 +346,7 @@ const renderRegionalGuidanceLayer = ( // Convert the color to a string, stripping the alpha - the object group will handle opacity. const rgbColor = rgbColorToString(reduxLayer.previewColor); - const konvaObjectGroup = konvaLayer.findOne(`.${regional_guidance_layer_OBJECT_GROUP_NAME}`); + const konvaObjectGroup = konvaLayer.findOne(`.${RG_LAYER_OBJECT_GROUP_NAME}`); assert(konvaObjectGroup, `Object group not found for layer ${reduxLayer.id}`); // We use caching to handle "global" layer opacity, but caching is expensive and we should only do it when required. @@ -411,7 +410,7 @@ const renderRegionalGuidanceLayer = ( const createControlNetLayer = (stage: Konva.Stage, reduxLayer: ControlAdapterLayer): Konva.Layer => { const konvaLayer = new Konva.Layer({ id: reduxLayer.id, - name: CONTROLNET_LAYER_NAME, + name: CA_LAYER_NAME, imageSmoothingEnabled: true, }); stage.add(konvaLayer); @@ -420,7 +419,7 @@ const createControlNetLayer = (stage: Konva.Stage, reduxLayer: ControlAdapterLay const createControlNetLayerImage = (konvaLayer: Konva.Layer, image: HTMLImageElement): Konva.Image => { const konvaImage = new Konva.Image({ - name: CONTROLNET_LAYER_IMAGE_NAME, + name: CA_LAYER_IMAGE_NAME, image, }); konvaLayer.add(konvaImage); @@ -432,32 +431,32 @@ const updateControlNetLayerImageSource = async ( konvaLayer: Konva.Layer, reduxLayer: ControlAdapterLayer ) => { - if (reduxLayer.imageName) { - const imageName = reduxLayer.imageName; - const req = getStore().dispatch(imagesApi.endpoints.getImageDTO.initiate(reduxLayer.imageName)); + const image = reduxLayer.controlAdapter.processedImage ?? reduxLayer.controlAdapter.image; + if (image) { + const { imageName } = image; + const req = getStore().dispatch(imagesApi.endpoints.getImageDTO.initiate(imageName)); const imageDTO = await req.unwrap(); req.unsubscribe(); - const image = new Image(); - const imageId = getControlNetLayerImageId(reduxLayer.id, imageName); - image.onload = () => { + const imageEl = new Image(); + const imageId = getCALayerImageId(reduxLayer.id, imageName); + imageEl.onload = () => { // Find the existing image or create a new one - must find using the name, bc the id may have just changed const konvaImage = - konvaLayer.findOne(`.${CONTROLNET_LAYER_IMAGE_NAME}`) ?? - createControlNetLayerImage(konvaLayer, image); + konvaLayer.findOne(`.${CA_LAYER_IMAGE_NAME}`) ?? createControlNetLayerImage(konvaLayer, imageEl); // Update the image's attributes konvaImage.setAttrs({ id: imageId, - image, + image: imageEl, }); updateControlNetLayerImageAttrs(stage, konvaImage, reduxLayer); // Must cache after this to apply the filters konvaImage.cache(); - image.id = imageId; + imageEl.id = imageId; }; - image.src = imageDTO.image_url; + imageEl.src = imageDTO.image_url; } else { - konvaLayer.findOne(`.${CONTROLNET_LAYER_IMAGE_NAME}`)?.destroy(); + konvaLayer.findOne(`.${CA_LAYER_IMAGE_NAME}`)?.destroy(); } }; @@ -497,16 +496,14 @@ const updateControlNetLayerImageAttrs = ( const renderControlNetLayer = (stage: Konva.Stage, reduxLayer: ControlAdapterLayer) => { const konvaLayer = stage.findOne(`#${reduxLayer.id}`) ?? createControlNetLayer(stage, reduxLayer); - const konvaImage = konvaLayer.findOne(`.${CONTROLNET_LAYER_IMAGE_NAME}`); + const konvaImage = konvaLayer.findOne(`.${CA_LAYER_IMAGE_NAME}`); const canvasImageSource = konvaImage?.image(); let imageSourceNeedsUpdate = false; if (canvasImageSource instanceof HTMLImageElement) { - if ( - reduxLayer.imageName && - canvasImageSource.id !== getControlNetLayerImageId(reduxLayer.id, reduxLayer.imageName) - ) { + const image = reduxLayer.controlAdapter.processedImage ?? reduxLayer.controlAdapter.image; + if (image && canvasImageSource.id !== getCALayerImageId(reduxLayer.id, image.imageName)) { imageSourceNeedsUpdate = true; - } else if (!reduxLayer.imageName) { + } else if (!image) { imageSourceNeedsUpdate = true; } } else if (!canvasImageSource) { diff --git a/invokeai/frontend/web/src/features/dnd/types/index.ts b/invokeai/frontend/web/src/features/dnd/types/index.ts index b2b7820762..7d109473ed 100644 --- a/invokeai/frontend/web/src/features/dnd/types/index.ts +++ b/invokeai/frontend/web/src/features/dnd/types/index.ts @@ -33,6 +33,28 @@ type ControlAdapterDropData = BaseDropData & { }; }; +export type CALayerImageDropData = BaseDropData & { + actionType: 'SET_CA_LAYER_IMAGE'; + context: { + layerId: string; + }; +}; + +export type IPALayerImageDropData = BaseDropData & { + actionType: 'SET_IPA_LAYER_IMAGE'; + context: { + layerId: string; + }; +}; + +export type RGLayerIPAdapterImageDropData = BaseDropData & { + actionType: 'SET_RG_LAYER_IP_ADAPTER_IMAGE'; + context: { + layerId: string; + ipAdapterId: string; + }; +}; + export type CanvasInitialImageDropData = BaseDropData & { actionType: 'SET_CANVAS_INITIAL_IMAGE'; }; @@ -61,7 +83,10 @@ export type TypesafeDroppableData = | CanvasInitialImageDropData | NodesImageDropData | AddToBoardDropData - | RemoveFromBoardDropData; + | RemoveFromBoardDropData + | CALayerImageDropData + | IPALayerImageDropData + | RGLayerIPAdapterImageDropData; type BaseDragData = { id: string; diff --git a/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts b/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts index c2c9de3f0c..c1da111087 100644 --- a/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts +++ b/invokeai/frontend/web/src/features/dnd/util/isValidDrop.ts @@ -19,6 +19,12 @@ export const isValidDrop = (overData: TypesafeDroppableData | undefined, active: return payloadType === 'IMAGE_DTO'; case 'SET_CONTROL_ADAPTER_IMAGE': return payloadType === 'IMAGE_DTO'; + case 'SET_CA_LAYER_IMAGE': + return payloadType === 'IMAGE_DTO'; + case 'SET_IPA_LAYER_IMAGE': + return payloadType === 'IMAGE_DTO'; + case 'SET_RG_LAYER_IP_ADAPTER_IMAGE': + return payloadType === 'IMAGE_DTO'; case 'SET_CANVAS_INITIAL_IMAGE': return payloadType === 'IMAGE_DTO'; case 'SET_NODES_IMAGE': diff --git a/invokeai/frontend/web/src/features/nodes/types/common.ts b/invokeai/frontend/web/src/features/nodes/types/common.ts index e570054258..ca44259995 100644 --- a/invokeai/frontend/web/src/features/nodes/types/common.ts +++ b/invokeai/frontend/web/src/features/nodes/types/common.ts @@ -49,6 +49,7 @@ export const zSchedulerField = z.enum([ 'euler_a', 'kdpm_2_a', 'lcm', + 'tcd', ]); export type SchedulerField = z.infer; // #endregion diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/addControlLayersToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/addControlLayersToGraph.ts index a7236af3cc..4581b51ee1 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/addControlLayersToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/addControlLayersToGraph.ts @@ -1,9 +1,23 @@ import { getStore } from 'app/store/nanostores/store'; import type { RootState } from 'app/store/store'; -import { selectAllIPAdapters } from 'features/controlAdapters/store/controlAdaptersSlice'; -import { isRegionalGuidanceLayer } from 'features/controlLayers/store/controlLayersSlice'; -import { getRegionalPromptLayerBlobs } from 'features/controlLayers/util/getLayerBlobs'; import { + isControlAdapterLayer, + isIPAdapterLayer, + isRegionalGuidanceLayer, +} from 'features/controlLayers/store/controlLayersSlice'; +import { + type ControlNetConfig, + type ImageWithDims, + type IPAdapterConfig, + isControlNetConfig, + isT2IAdapterConfig, + type ProcessorConfig, + type T2IAdapterConfig, +} from 'features/controlLayers/util/controlAdapters'; +import { getRegionalPromptLayerBlobs } from 'features/controlLayers/util/getLayerBlobs'; +import type { ImageField } from 'features/nodes/types/common'; +import { + CONTROL_NET_COLLECT, IP_ADAPTER_COLLECT, NEGATIVE_CONDITIONING, NEGATIVE_CONDITIONING_COLLECT, @@ -14,45 +28,383 @@ import { PROMPT_REGION_NEGATIVE_COND_PREFIX, PROMPT_REGION_POSITIVE_COND_INVERTED_PREFIX, PROMPT_REGION_POSITIVE_COND_PREFIX, + T2I_ADAPTER_COLLECT, } from 'features/nodes/util/graph/constants'; +import { upsertMetadata } from 'features/nodes/util/graph/metadata'; import { size } from 'lodash-es'; import { imagesApi } from 'services/api/endpoints/images'; -import type { CollectInvocation, Edge, IPAdapterInvocation, NonNullableGraph, S } from 'services/api/types'; +import type { + CollectInvocation, + ControlNetInvocation, + CoreMetadataInvocation, + Edge, + IPAdapterInvocation, + NonNullableGraph, + S, + T2IAdapterInvocation, +} from 'services/api/types'; import { assert } from 'tsafe'; -export const addControlLayersToGraph = async (state: RootState, graph: NonNullableGraph, denoiseNodeId: string) => { - if (!state.controlLayers.present.isEnabled) { +const buildControlImage = ( + image: ImageWithDims | null, + processedImage: ImageWithDims | null, + processorConfig: ProcessorConfig | null +): ImageField => { + if (processedImage && processorConfig) { + // We've processed the image in the app - use it for the control image. + return { + image_name: processedImage.imageName, + }; + } else if (image) { + // No processor selected, and we have an image - the user provided a processed image, use it for the control image. + return { + image_name: image.imageName, + }; + } + assert(false, 'Attempted to add unprocessed control image'); +}; + +const buildControlNetMetadata = (controlNet: ControlNetConfig): S['ControlNetMetadataField'] => { + const { beginEndStepPct, controlMode, image, model, processedImage, processorConfig, weight } = controlNet; + + assert(model, 'ControlNet model is required'); + assert(image, 'ControlNet image is required'); + + const processed_image = + processedImage && processorConfig + ? { + image_name: processedImage.imageName, + } + : null; + + return { + control_model: model, + control_weight: weight, + control_mode: controlMode, + begin_step_percent: beginEndStepPct[0], + end_step_percent: beginEndStepPct[1], + resize_mode: 'just_resize', + image: { + image_name: image.imageName, + }, + processed_image, + }; +}; + +const addControlNetCollectorSafe = (graph: NonNullableGraph, denoiseNodeId: string) => { + if (graph.nodes[CONTROL_NET_COLLECT]) { + // You see, we've already got one! return; } + // Add the ControlNet collector + const controlNetIterateNode: CollectInvocation = { + id: CONTROL_NET_COLLECT, + type: 'collect', + is_intermediate: true, + }; + graph.nodes[CONTROL_NET_COLLECT] = controlNetIterateNode; + graph.edges.push({ + source: { node_id: CONTROL_NET_COLLECT, field: 'collection' }, + destination: { + node_id: denoiseNodeId, + field: 'control', + }, + }); +}; + +const addGlobalControlNetsToGraph = async ( + controlNets: ControlNetConfig[], + graph: NonNullableGraph, + denoiseNodeId: string +) => { + if (controlNets.length === 0) { + return; + } + const controlNetMetadata: CoreMetadataInvocation['controlnets'] = []; + addControlNetCollectorSafe(graph, denoiseNodeId); + + for (const controlNet of controlNets) { + if (!controlNet.model) { + return; + } + const { id, beginEndStepPct, controlMode, image, model, processedImage, processorConfig, weight } = controlNet; + + const controlNetNode: ControlNetInvocation = { + id: `control_net_${id}`, + type: 'controlnet', + is_intermediate: true, + begin_step_percent: beginEndStepPct[0], + end_step_percent: beginEndStepPct[1], + control_mode: controlMode, + resize_mode: 'just_resize', + control_model: model, + control_weight: weight, + image: buildControlImage(image, processedImage, processorConfig), + }; + + graph.nodes[controlNetNode.id] = controlNetNode; + + controlNetMetadata.push(buildControlNetMetadata(controlNet)); + + graph.edges.push({ + source: { node_id: controlNetNode.id, field: 'control' }, + destination: { + node_id: CONTROL_NET_COLLECT, + field: 'item', + }, + }); + } + upsertMetadata(graph, { controlnets: controlNetMetadata }); +}; + +const buildT2IAdapterMetadata = (t2iAdapter: T2IAdapterConfig): S['T2IAdapterMetadataField'] => { + const { beginEndStepPct, image, model, processedImage, processorConfig, weight } = t2iAdapter; + + assert(model, 'T2I Adapter model is required'); + assert(image, 'T2I Adapter image is required'); + + const processed_image = + processedImage && processorConfig + ? { + image_name: processedImage.imageName, + } + : null; + + return { + t2i_adapter_model: model, + weight, + begin_step_percent: beginEndStepPct[0], + end_step_percent: beginEndStepPct[1], + resize_mode: 'just_resize', + image: { + image_name: image.imageName, + }, + processed_image, + }; +}; + +const addT2IAdapterCollectorSafe = (graph: NonNullableGraph, denoiseNodeId: string) => { + if (graph.nodes[T2I_ADAPTER_COLLECT]) { + // You see, we've already got one! + return; + } + // Even though denoise_latents' t2i adapter input is collection or scalar, keep it simple and always use a collect + const t2iAdapterCollectNode: CollectInvocation = { + id: T2I_ADAPTER_COLLECT, + type: 'collect', + is_intermediate: true, + }; + graph.nodes[T2I_ADAPTER_COLLECT] = t2iAdapterCollectNode; + graph.edges.push({ + source: { node_id: T2I_ADAPTER_COLLECT, field: 'collection' }, + destination: { + node_id: denoiseNodeId, + field: 't2i_adapter', + }, + }); +}; + +const addGlobalT2IAdaptersToGraph = async ( + t2iAdapters: T2IAdapterConfig[], + graph: NonNullableGraph, + denoiseNodeId: string +) => { + if (t2iAdapters.length === 0) { + return; + } + const t2iAdapterMetadata: CoreMetadataInvocation['t2iAdapters'] = []; + addT2IAdapterCollectorSafe(graph, denoiseNodeId); + + for (const t2iAdapter of t2iAdapters) { + if (!t2iAdapter.model) { + return; + } + const { id, beginEndStepPct, image, model, processedImage, processorConfig, weight } = t2iAdapter; + + const t2iAdapterNode: T2IAdapterInvocation = { + id: `t2i_adapter_${id}`, + type: 't2i_adapter', + is_intermediate: true, + begin_step_percent: beginEndStepPct[0], + end_step_percent: beginEndStepPct[1], + resize_mode: 'just_resize', + t2i_adapter_model: model, + weight: weight, + image: buildControlImage(image, processedImage, processorConfig), + }; + + graph.nodes[t2iAdapterNode.id] = t2iAdapterNode; + + t2iAdapterMetadata.push(buildT2IAdapterMetadata(t2iAdapter)); + + graph.edges.push({ + source: { node_id: t2iAdapterNode.id, field: 't2i_adapter' }, + destination: { + node_id: T2I_ADAPTER_COLLECT, + field: 'item', + }, + }); + } + + upsertMetadata(graph, { t2iAdapters: t2iAdapterMetadata }); +}; + +const buildIPAdapterMetadata = (ipAdapter: IPAdapterConfig): S['IPAdapterMetadataField'] => { + const { weight, model, clipVisionModel, method, beginEndStepPct, image } = ipAdapter; + + assert(model, 'IP Adapter model is required'); + assert(image, 'IP Adapter image is required'); + + return { + ip_adapter_model: model, + clip_vision_model: clipVisionModel, + weight, + method, + begin_step_percent: beginEndStepPct[0], + end_step_percent: beginEndStepPct[1], + image: { + image_name: image.imageName, + }, + }; +}; + +const addIPAdapterCollectorSafe = (graph: NonNullableGraph, denoiseNodeId: string) => { + if (graph.nodes[IP_ADAPTER_COLLECT]) { + // You see, we've already got one! + return; + } + + const ipAdapterCollectNode: CollectInvocation = { + id: IP_ADAPTER_COLLECT, + type: 'collect', + is_intermediate: true, + }; + graph.nodes[IP_ADAPTER_COLLECT] = ipAdapterCollectNode; + graph.edges.push({ + source: { node_id: IP_ADAPTER_COLLECT, field: 'collection' }, + destination: { + node_id: denoiseNodeId, + field: 'ip_adapter', + }, + }); +}; + +const addGlobalIPAdaptersToGraph = async ( + ipAdapters: IPAdapterConfig[], + graph: NonNullableGraph, + denoiseNodeId: string +) => { + if (ipAdapters.length === 0) { + return; + } + const ipAdapterMetdata: CoreMetadataInvocation['ipAdapters'] = []; + addIPAdapterCollectorSafe(graph, denoiseNodeId); + + for (const ipAdapter of ipAdapters) { + const { id, weight, model, clipVisionModel, method, beginEndStepPct, image } = ipAdapter; + assert(image, 'IP Adapter image is required'); + assert(model, 'IP Adapter model is required'); + + const ipAdapterNode: IPAdapterInvocation = { + id: `ip_adapter_${id}`, + type: 'ip_adapter', + is_intermediate: true, + weight, + method, + ip_adapter_model: model, + clip_vision_model: clipVisionModel, + begin_step_percent: beginEndStepPct[0], + end_step_percent: beginEndStepPct[1], + image: { + image_name: image.imageName, + }, + }; + + graph.nodes[ipAdapterNode.id] = ipAdapterNode; + + ipAdapterMetdata.push(buildIPAdapterMetadata(ipAdapter)); + + graph.edges.push({ + source: { node_id: ipAdapterNode.id, field: 'ip_adapter' }, + destination: { + node_id: IP_ADAPTER_COLLECT, + field: 'item', + }, + }); + } + + upsertMetadata(graph, { ipAdapters: ipAdapterMetdata }); +}; + +export const addControlLayersToGraph = async (state: RootState, graph: NonNullableGraph, denoiseNodeId: string) => { const { dispatch } = getStore(); - const isSDXL = state.generation.model?.base === 'sdxl'; - const layers = state.controlLayers.present.layers - // Only support vector mask layers now - // TODO: Image masks + const mainModel = state.generation.model; + assert(mainModel, 'Missing main model when building graph'); + const isSDXL = mainModel.base === 'sdxl'; + + // Add global control adapters + const globalControlNets = state.controlLayers.present.layers + // Must be a CA layer + .filter(isControlAdapterLayer) + // Must be enabled + .filter((l) => l.isEnabled) + // We want the CAs themselves + .map((l) => l.controlAdapter) + // Must be a ControlNet + .filter(isControlNetConfig) + .filter((ca) => { + const hasModel = Boolean(ca.model); + const modelMatchesBase = ca.model?.base === mainModel.base; + const hasControlImage = ca.image || (ca.processedImage && ca.processorConfig); + return hasModel && modelMatchesBase && hasControlImage; + }); + addGlobalControlNetsToGraph(globalControlNets, graph, denoiseNodeId); + + const globalT2IAdapters = state.controlLayers.present.layers + // Must be a CA layer + .filter(isControlAdapterLayer) + // Must be enabled + .filter((l) => l.isEnabled) + // We want the CAs themselves + .map((l) => l.controlAdapter) + // Must have a ControlNet CA + .filter(isT2IAdapterConfig) + .filter((ca) => { + const hasModel = Boolean(ca.model); + const modelMatchesBase = ca.model?.base === mainModel.base; + const hasControlImage = ca.image || (ca.processedImage && ca.processorConfig); + return hasModel && modelMatchesBase && hasControlImage; + }); + addGlobalT2IAdaptersToGraph(globalT2IAdapters, graph, denoiseNodeId); + + const globalIPAdapters = state.controlLayers.present.layers + // Must be an IP Adapter layer + .filter(isIPAdapterLayer) + // Must be enabled + .filter((l) => l.isEnabled) + // We want the IP Adapters themselves + .map((l) => l.ipAdapter) + .filter((ca) => { + const hasModel = Boolean(ca.model); + const modelMatchesBase = ca.model?.base === mainModel.base; + const hasControlImage = Boolean(ca.image); + return hasModel && modelMatchesBase && hasControlImage; + }); + addGlobalIPAdaptersToGraph(globalIPAdapters, graph, denoiseNodeId); + + const rgLayers = state.controlLayers.present.layers + // Only RG layers are get masks .filter(isRegionalGuidanceLayer) // Only visible layers are rendered on the canvas .filter((l) => l.isEnabled) // Only layers with prompts get added to the graph .filter((l) => { const hasTextPrompt = Boolean(l.positivePrompt || l.negativePrompt); - const hasIPAdapter = l.ipAdapterIds.length !== 0; + const hasIPAdapter = l.ipAdapters.length !== 0; return hasTextPrompt || hasIPAdapter; }); - // Collect all IP Adapter ids for IP adapter layers - const layerIPAdapterIds = layers.flatMap((l) => l.ipAdapterIds); - - const regionalIPAdapters = selectAllIPAdapters(state.controlAdapters).filter( - ({ id, model, controlImage, isEnabled }) => { - const hasModel = Boolean(model); - const doesBaseMatch = model?.base === state.generation.model?.base; - const hasControlImage = controlImage; - const isRegional = layerIPAdapterIds.includes(id); - return isEnabled && hasModel && doesBaseMatch && hasControlImage && isRegional; - } - ); - - const layerIds = layers.map((l) => l.id); + const layerIds = rgLayers.map((l) => l.id); const blobs = await getRegionalPromptLayerBlobs(layerIds); assert(size(blobs) === size(layerIds), 'Mismatch between layer IDs and blobs'); @@ -118,27 +470,11 @@ export const addControlLayersToGraph = async (state: RootState, graph: NonNullab }, }); - if (!graph.nodes[IP_ADAPTER_COLLECT] && regionalIPAdapters.length > 0) { - const ipAdapterCollectNode: CollectInvocation = { - id: IP_ADAPTER_COLLECT, - type: 'collect', - is_intermediate: true, - }; - graph.nodes[IP_ADAPTER_COLLECT] = ipAdapterCollectNode; - graph.edges.push({ - source: { node_id: IP_ADAPTER_COLLECT, field: 'collection' }, - destination: { - node_id: denoiseNodeId, - field: 'ip_adapter', - }, - }); - } - // Upload the blobs to the backend, add each to graph // TODO: Store the uploaded image names in redux to reuse them, so long as the layer hasn't otherwise changed. This // would be a great perf win - not only would we skip re-uploading the same image, but we'd be able to use the node // cache (currently, when we re-use the same mask data, since it is a different image, the node cache is not used). - for (const layer of layers) { + for (const layer of rgLayers) { const blob = blobs[layer.id]; assert(blob, `Blob for layer ${layer.id} not found`); @@ -296,36 +632,32 @@ export const addControlLayersToGraph = async (state: RootState, graph: NonNullab } } - for (const ipAdapterId of layer.ipAdapterIds) { - const ipAdapter = selectAllIPAdapters(state.controlAdapters) - .filter(({ id, model, controlImage, isEnabled }) => { - const hasModel = Boolean(model); - const doesBaseMatch = model?.base === state.generation.model?.base; - const hasControlImage = controlImage; - const isRegional = layers.some((l) => l.ipAdapterIds.includes(id)); - return isEnabled && hasModel && doesBaseMatch && hasControlImage && isRegional; - }) - .find((ca) => ca.id === ipAdapterId); + // TODO(psyche): For some reason, I have to explicitly annotate regionalIPAdapters here. Not sure why. + const regionalIPAdapters: IPAdapterConfig[] = layer.ipAdapters.filter((ipAdapter) => { + const hasModel = Boolean(ipAdapter.model); + const modelMatchesBase = ipAdapter.model?.base === mainModel.base; + const hasControlImage = Boolean(ipAdapter.image); + return hasModel && modelMatchesBase && hasControlImage; + }); - if (!ipAdapter?.model) { - return; - } - const { id, weight, model, clipVisionModel, method, beginStepPct, endStepPct, controlImage } = ipAdapter; - - assert(controlImage, 'IP Adapter image is required'); + for (const ipAdapter of regionalIPAdapters) { + addIPAdapterCollectorSafe(graph, denoiseNodeId); + const { id, weight, model, clipVisionModel, method, beginEndStepPct, image } = ipAdapter; + assert(model, 'IP Adapter model is required'); + assert(image, 'IP Adapter image is required'); const ipAdapterNode: IPAdapterInvocation = { id: `ip_adapter_${id}`, type: 'ip_adapter', is_intermediate: true, - weight: weight, - method: method, + weight, + method, ip_adapter_model: model, clip_vision_model: clipVisionModel, - begin_step_percent: beginStepPct, - end_step_percent: endStepPct, + begin_step_percent: beginEndStepPct[0], + end_step_percent: beginEndStepPct[1], image: { - image_name: controlImage, + image_name: image.imageName, }, }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/addControlNetToLinearGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/addControlNetToLinearGraph.ts index fb912d0be2..363d97badf 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/addControlNetToLinearGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/addControlNetToLinearGraph.ts @@ -1,10 +1,8 @@ import type { RootState } from 'app/store/store'; import { selectValidControlNets } from 'features/controlAdapters/store/controlAdaptersSlice'; import type { ControlAdapterProcessorType, ControlNetConfig } from 'features/controlAdapters/store/types'; -import { isControlAdapterLayer } from 'features/controlLayers/store/controlLayersSlice'; import type { ImageField } from 'features/nodes/types/common'; import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; -import { differenceWith, intersectionWith } from 'lodash-es'; import type { CollectInvocation, ControlNetInvocation, @@ -17,9 +15,13 @@ import { assert } from 'tsafe'; import { CONTROL_NET_COLLECT } from './constants'; import { upsertMetadata } from './metadata'; -const getControlNets = (state: RootState) => { - // Start with the valid controlnets - const validControlNets = selectValidControlNets(state.controlAdapters).filter( +export const addControlNetToLinearGraph = async ( + state: RootState, + graph: NonNullableGraph, + baseNodeId: string +): Promise => { + const controlNetMetadata: CoreMetadataInvocation['controlnets'] = []; + const controlNets = selectValidControlNets(state.controlAdapters).filter( ({ model, processedControlImage, processorType, controlImage, isEnabled }) => { const hasModel = Boolean(model); const doesBaseMatch = model?.base === state.generation.model?.base; @@ -29,35 +31,9 @@ const getControlNets = (state: RootState) => { } ); - // txt2img tab has special handling - it uses layers exclusively, while the other tabs use the older control adapters - // accordion. We need to filter the list of valid T2I adapters according to the tab. + // The txt2img tab has special handling - its control adapters are set up in the Control Layers graph helper. const activeTabName = activeTabNameSelector(state); - - if (activeTabName === 'txt2img') { - // Add only the cnets that are used in control layers - // Collect all ControlNet ids for enabled ControlNet layers - const layerControlNetIds = state.controlLayers.present.layers - .filter(isControlAdapterLayer) - .filter((l) => l.isEnabled) - .map((l) => l.controlNetId); - return intersectionWith(validControlNets, layerControlNetIds, (a, b) => a.id === b); - } else { - // Else, we want to exclude the cnets that are used in control layers - // Collect all ControlNet ids for all ControlNet layers - const layerControlNetIds = state.controlLayers.present.layers - .filter(isControlAdapterLayer) - .map((l) => l.controlNetId); - return differenceWith(validControlNets, layerControlNetIds, (a, b) => a.id === b); - } -}; - -export const addControlNetToLinearGraph = async ( - state: RootState, - graph: NonNullableGraph, - baseNodeId: string -): Promise => { - const controlNets = getControlNets(state); - const controlNetMetadata: CoreMetadataInvocation['controlnets'] = []; + assert(activeTabName !== 'txt2img', 'Tried to use addControlNetToLinearGraph on txt2img tab'); if (controlNets.length) { // Even though denoise_latents' control input is collection or scalar, keep it simple and always use a collect diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/addIPAdapterToLinearGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/addIPAdapterToLinearGraph.ts index 2c53fb3827..12ba4e12a8 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/addIPAdapterToLinearGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/addIPAdapterToLinearGraph.ts @@ -1,10 +1,8 @@ import type { RootState } from 'app/store/store'; import { selectValidIPAdapters } from 'features/controlAdapters/store/controlAdaptersSlice'; import type { IPAdapterConfig } from 'features/controlAdapters/store/types'; -import { isIPAdapterLayer, isRegionalGuidanceLayer } from 'features/controlLayers/store/controlLayersSlice'; import type { ImageField } from 'features/nodes/types/common'; import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; -import { differenceWith, intersectionWith } from 'lodash-es'; import type { CollectInvocation, CoreMetadataInvocation, @@ -17,48 +15,21 @@ import { assert } from 'tsafe'; import { IP_ADAPTER_COLLECT } from './constants'; import { upsertMetadata } from './metadata'; -const getIPAdapters = (state: RootState) => { - // Start with the valid IP adapters - const validIPAdapters = selectValidIPAdapters(state.controlAdapters).filter(({ model, controlImage, isEnabled }) => { - const hasModel = Boolean(model); - const doesBaseMatch = model?.base === state.generation.model?.base; - const hasControlImage = controlImage; - return isEnabled && hasModel && doesBaseMatch && hasControlImage; - }); - - // Masked IP adapters are handled in the graph helper for regional control - skip them here - const maskedIPAdapterIds = state.controlLayers.present.layers - .filter(isRegionalGuidanceLayer) - .map((l) => l.ipAdapterIds) - .flat(); - const nonMaskedIPAdapters = differenceWith(validIPAdapters, maskedIPAdapterIds, (a, b) => a.id === b); - - // txt2img tab has special handling - it uses layers exclusively, while the other tabs use the older control adapters - // accordion. We need to filter the list of valid IP adapters according to the tab. - const activeTabName = activeTabNameSelector(state); - - if (activeTabName === 'txt2img') { - // If we are on the t2i tab, we only want to add the IP adapters that are used in unmasked IP Adapter layers - // Collect all IP Adapter ids for enabled IP adapter layers - const layerIPAdapterIds = state.controlLayers.present.layers - .filter(isIPAdapterLayer) - .filter((l) => l.isEnabled) - .map((l) => l.ipAdapterId); - return intersectionWith(nonMaskedIPAdapters, layerIPAdapterIds, (a, b) => a.id === b); - } else { - // Else, we want to exclude the IP adapters that are used in IP Adapter layers - // Collect all IP Adapter ids for enabled IP adapter layers - const layerIPAdapterIds = state.controlLayers.present.layers.filter(isIPAdapterLayer).map((l) => l.ipAdapterId); - return differenceWith(nonMaskedIPAdapters, layerIPAdapterIds, (a, b) => a.id === b); - } -}; - export const addIPAdapterToLinearGraph = async ( state: RootState, graph: NonNullableGraph, baseNodeId: string ): Promise => { - const ipAdapters = getIPAdapters(state); + // The txt2img tab has special handling - its control adapters are set up in the Control Layers graph helper. + const activeTabName = activeTabNameSelector(state); + assert(activeTabName !== 'txt2img', 'Tried to use addT2IAdaptersToLinearGraph on txt2img tab'); + + const ipAdapters = selectValidIPAdapters(state.controlAdapters).filter(({ model, controlImage, isEnabled }) => { + const hasModel = Boolean(model); + const doesBaseMatch = model?.base === state.generation.model?.base; + const hasControlImage = controlImage; + return isEnabled && hasModel && doesBaseMatch && hasControlImage; + }); if (ipAdapters.length) { // Even though denoise_latents' ip adapter input is collection or scalar, keep it simple and always use a collect diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/addT2IAdapterToLinearGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/addT2IAdapterToLinearGraph.ts index 1632449724..ddd87256f4 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/addT2IAdapterToLinearGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/addT2IAdapterToLinearGraph.ts @@ -1,10 +1,8 @@ import type { RootState } from 'app/store/store'; import { selectValidT2IAdapters } from 'features/controlAdapters/store/controlAdaptersSlice'; import type { ControlAdapterProcessorType, T2IAdapterConfig } from 'features/controlAdapters/store/types'; -import { isControlAdapterLayer } from 'features/controlLayers/store/controlLayersSlice'; import type { ImageField } from 'features/nodes/types/common'; import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; -import { differenceWith, intersectionWith } from 'lodash-es'; import type { CollectInvocation, CoreMetadataInvocation, @@ -17,9 +15,16 @@ import { assert } from 'tsafe'; import { T2I_ADAPTER_COLLECT } from './constants'; import { upsertMetadata } from './metadata'; -const getT2IAdapters = (state: RootState) => { - // Start with the valid controlnets - const validT2IAdapters = selectValidT2IAdapters(state.controlAdapters).filter( +export const addT2IAdaptersToLinearGraph = async ( + state: RootState, + graph: NonNullableGraph, + baseNodeId: string +): Promise => { + // The txt2img tab has special handling - its control adapters are set up in the Control Layers graph helper. + const activeTabName = activeTabNameSelector(state); + assert(activeTabName !== 'txt2img', 'Tried to use addT2IAdaptersToLinearGraph on txt2img tab'); + + const t2iAdapters = selectValidT2IAdapters(state.controlAdapters).filter( ({ model, processedControlImage, processorType, controlImage, isEnabled }) => { const hasModel = Boolean(model); const doesBaseMatch = model?.base === state.generation.model?.base; @@ -29,34 +34,6 @@ const getT2IAdapters = (state: RootState) => { } ); - // txt2img tab has special handling - it uses layers exclusively, while the other tabs use the older control adapters - // accordion. We need to filter the list of valid T2I adapters according to the tab. - const activeTabName = activeTabNameSelector(state); - - if (activeTabName === 'txt2img') { - // Add only the T2Is that are used in control layers - // Collect all ids for enabled control adapter layers - const layerControlAdapterIds = state.controlLayers.present.layers - .filter(isControlAdapterLayer) - .filter((l) => l.isEnabled) - .map((l) => l.controlNetId); - return intersectionWith(validT2IAdapters, layerControlAdapterIds, (a, b) => a.id === b); - } else { - // Else, we want to exclude the T2Is that are used in control layers - const layerControlAdapterIds = state.controlLayers.present.layers - .filter(isControlAdapterLayer) - .map((l) => l.controlNetId); - return differenceWith(validT2IAdapters, layerControlAdapterIds, (a, b) => a.id === b); - } -}; - -export const addT2IAdaptersToLinearGraph = async ( - state: RootState, - graph: NonNullableGraph, - baseNodeId: string -): Promise => { - const t2iAdapters = getT2IAdapters(state); - if (t2iAdapters.length) { // Even though denoise_latents' t2i adapter input is collection or scalar, keep it simple and always use a collect const t2iAdapterCollectNode: CollectInvocation = { diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearSDXLTextToImageGraph.ts index 010fb9c5e4..9134ef9de7 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearSDXLTextToImageGraph.ts @@ -4,13 +4,10 @@ import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetch import { addControlLayersToGraph } from 'features/nodes/util/graph/addControlLayersToGraph'; import { isNonRefinerMainModelConfig, type NonNullableGraph } from 'services/api/types'; -import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; -import { addIPAdapterToLinearGraph } from './addIPAdapterToLinearGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph'; -import { addT2IAdaptersToLinearGraph } from './addT2IAdapterToLinearGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { @@ -264,14 +261,6 @@ export const buildLinearSDXLTextToImageGraph = async (state: RootState): Promise // add LoRA support await addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId); - // add controlnet, mutating `graph` - await addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS); - - // add IP Adapter - await addIPAdapterToLinearGraph(state, graph, SDXL_DENOISE_LATENTS); - - await addT2IAdaptersToLinearGraph(state, graph, SDXL_DENOISE_LATENTS); - await addControlLayersToGraph(state, graph, SDXL_DENOISE_LATENTS); // NSFW & watermark - must be last thing added to graph diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearTextToImageGraph.ts index ea59d7e41d..340a24bca4 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearTextToImageGraph.ts @@ -5,13 +5,10 @@ import { addControlLayersToGraph } from 'features/nodes/util/graph/addControlLay import { getBoardField, getIsIntermediate } from 'features/nodes/util/graph/graphBuilderUtils'; import { isNonRefinerMainModelConfig, type NonNullableGraph } from 'services/api/types'; -import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addHrfToGraph } from './addHrfToGraph'; -import { addIPAdapterToLinearGraph } from './addIPAdapterToLinearGraph'; import { addLoRAsToGraph } from './addLoRAsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph'; -import { addT2IAdaptersToLinearGraph } from './addT2IAdapterToLinearGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { @@ -246,14 +243,6 @@ export const buildLinearTextToImageGraph = async (state: RootState): Promise a.label.localeCompare(b.label)); diff --git a/invokeai/frontend/web/src/features/settingsAccordions/components/ControlSettingsAccordion/ControlSettingsAccordion.tsx b/invokeai/frontend/web/src/features/settingsAccordions/components/ControlSettingsAccordion/ControlSettingsAccordion.tsx index d072cfde0f..eef997a11b 100644 --- a/invokeai/frontend/web/src/features/settingsAccordions/components/ControlSettingsAccordion/ControlSettingsAccordion.tsx +++ b/invokeai/frontend/web/src/features/settingsAccordions/components/ControlSettingsAccordion/ControlSettingsAccordion.tsx @@ -13,66 +13,52 @@ import { selectValidIPAdapters, selectValidT2IAdapters, } from 'features/controlAdapters/store/controlAdaptersSlice'; -import { selectAllControlAdapterIds, selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice'; import { useStandaloneAccordionToggle } from 'features/settingsAccordions/hooks/useStandaloneAccordionToggle'; import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; import { Fragment, memo } from 'react'; import { useTranslation } from 'react-i18next'; import { PiPlusBold } from 'react-icons/pi'; -const selector = createMemoizedSelector( - [selectControlAdaptersSlice, selectControlLayersSlice], - (controlAdapters, controlLayers) => { - const badges: string[] = []; - let isError = false; +const selector = createMemoizedSelector([selectControlAdaptersSlice], (controlAdapters) => { + const badges: string[] = []; + let isError = false; - const controlLayersAdapterIds = selectAllControlAdapterIds(controlLayers.present); + const enabledNonRegionalIPAdapterCount = selectAllIPAdapters(controlAdapters).filter((ca) => ca.isEnabled).length; - const enabledNonRegionalIPAdapterCount = selectAllIPAdapters(controlAdapters) - .filter((ca) => !controlLayersAdapterIds.includes(ca.id)) - .filter((ca) => ca.isEnabled).length; - - const validIPAdapterCount = selectValidIPAdapters(controlAdapters).length; - if (enabledNonRegionalIPAdapterCount > 0) { - badges.push(`${enabledNonRegionalIPAdapterCount} IP`); - } - if (enabledNonRegionalIPAdapterCount > validIPAdapterCount) { - isError = true; - } - - const enabledControlNetCount = selectAllControlNets(controlAdapters) - .filter((ca) => !controlLayersAdapterIds.includes(ca.id)) - .filter((ca) => ca.isEnabled).length; - const validControlNetCount = selectValidControlNets(controlAdapters).length; - if (enabledControlNetCount > 0) { - badges.push(`${enabledControlNetCount} ControlNet`); - } - if (enabledControlNetCount > validControlNetCount) { - isError = true; - } - - const enabledT2IAdapterCount = selectAllT2IAdapters(controlAdapters) - .filter((ca) => !controlLayersAdapterIds.includes(ca.id)) - .filter((ca) => ca.isEnabled).length; - const validT2IAdapterCount = selectValidT2IAdapters(controlAdapters).length; - if (enabledT2IAdapterCount > 0) { - badges.push(`${enabledT2IAdapterCount} T2I`); - } - if (enabledT2IAdapterCount > validT2IAdapterCount) { - isError = true; - } - - const controlAdapterIds = selectControlAdapterIds(controlAdapters).filter( - (id) => !controlLayersAdapterIds.includes(id) - ); - - return { - controlAdapterIds, - badges, - isError, // TODO: Add some visual indicator that the control adapters are in an error state - }; + const validIPAdapterCount = selectValidIPAdapters(controlAdapters).length; + if (enabledNonRegionalIPAdapterCount > 0) { + badges.push(`${enabledNonRegionalIPAdapterCount} IP`); } -); + if (enabledNonRegionalIPAdapterCount > validIPAdapterCount) { + isError = true; + } + + const enabledControlNetCount = selectAllControlNets(controlAdapters).filter((ca) => ca.isEnabled).length; + const validControlNetCount = selectValidControlNets(controlAdapters).length; + if (enabledControlNetCount > 0) { + badges.push(`${enabledControlNetCount} ControlNet`); + } + if (enabledControlNetCount > validControlNetCount) { + isError = true; + } + + const enabledT2IAdapterCount = selectAllT2IAdapters(controlAdapters).filter((ca) => ca.isEnabled).length; + const validT2IAdapterCount = selectValidT2IAdapters(controlAdapters).length; + if (enabledT2IAdapterCount > 0) { + badges.push(`${enabledT2IAdapterCount} T2I`); + } + if (enabledT2IAdapterCount > validT2IAdapterCount) { + isError = true; + } + + const controlAdapterIds = selectControlAdapterIds(controlAdapters); + + return { + controlAdapterIds, + badges, + isError, // TODO: Add some visual indicator that the control adapters are in an error state + }; +}); export const ControlSettingsAccordion: React.FC = memo(() => { const { t } = useTranslation(); diff --git a/invokeai/frontend/web/src/features/settingsAccordions/components/ImageSettingsAccordion/ImageSizeCanvas.tsx b/invokeai/frontend/web/src/features/settingsAccordions/components/ImageSettingsAccordion/ImageSizeCanvas.tsx index 878174fe75..1dc8f49b78 100644 --- a/invokeai/frontend/web/src/features/settingsAccordions/components/ImageSettingsAccordion/ImageSizeCanvas.tsx +++ b/invokeai/frontend/web/src/features/settingsAccordions/components/ImageSettingsAccordion/ImageSizeCanvas.tsx @@ -16,6 +16,9 @@ export const ImageSizeCanvas = memo(() => { const onChangeWidth = useCallback( (width: number) => { + if (width === 0) { + return; + } dispatch(setBoundingBoxDimensions({ width }, optimalDimension)); }, [dispatch, optimalDimension] @@ -23,6 +26,9 @@ export const ImageSizeCanvas = memo(() => { const onChangeHeight = useCallback( (height: number) => { + if (height === 0) { + return; + } dispatch(setBoundingBoxDimensions({ height }, optimalDimension)); }, [dispatch, optimalDimension] diff --git a/invokeai/frontend/web/src/features/settingsAccordions/components/ImageSettingsAccordion/ImageSizeLinear.tsx b/invokeai/frontend/web/src/features/settingsAccordions/components/ImageSettingsAccordion/ImageSizeLinear.tsx index 7e436556da..0b28200ca2 100644 --- a/invokeai/frontend/web/src/features/settingsAccordions/components/ImageSettingsAccordion/ImageSizeLinear.tsx +++ b/invokeai/frontend/web/src/features/settingsAccordions/components/ImageSettingsAccordion/ImageSizeLinear.tsx @@ -18,6 +18,9 @@ export const ImageSizeLinear = memo(() => { const onChangeWidth = useCallback( (width: number) => { + if (width === 0) { + return; + } dispatch(widthChanged({ width })); }, [dispatch] @@ -25,6 +28,9 @@ export const ImageSizeLinear = memo(() => { const onChangeHeight = useCallback( (height: number) => { + if (height === 0) { + return; + } dispatch(heightChanged({ height })); }, [dispatch] diff --git a/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts b/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts index 2d04b9dc46..a42df8f600 100644 --- a/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts +++ b/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts @@ -4,6 +4,7 @@ import { modelConfigsAdapterSelectors, useGetModelConfigsQuery } from 'services/ import type { AnyModelConfig } from 'services/api/types'; import { isControlNetModelConfig, + isControlNetOrT2IAdapterModelConfig, isIPAdapterModelConfig, isLoRAModelConfig, isNonRefinerMainModelConfig, @@ -35,6 +36,7 @@ export const useNonSDXLMainModels = buildModelsHook(isNonSDXLMainModelConfig); export const useRefinerModels = buildModelsHook(isRefinerMainModelModelConfig); export const useSDXLModels = buildModelsHook(isSDXLMainModelModelConfig); export const useLoRAModels = buildModelsHook(isLoRAModelConfig); +export const useControlNetAndT2IAdapterModels = buildModelsHook(isControlNetOrT2IAdapterModelConfig); export const useControlNetModels = buildModelsHook(isControlNetModelConfig); export const useT2IAdapterModels = buildModelsHook(isT2IAdapterModelConfig); export const useIPAdapterModels = buildModelsHook(isIPAdapterModelConfig); diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 727fad6f81..27a3c670da 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -2883,6 +2883,11 @@ export type components = { * @description OPTIONAL: Only connect for specialized Inpainting models, masked_latents will be generated from the image with the VAE */ image?: components["schemas"]["ImageField"] | null; + /** + * [OPTIONAL] UNet + * @description OPTIONAL: If the Unet is a specialized Inpainting model, masked_latents will be generated from the image with the VAE + */ + unet?: components["schemas"]["UNetField"] | null; /** * [OPTIONAL] VAE * @description OPTIONAL: Only connect for specialized Inpainting models, masked_latents will be generated from the image with the VAE @@ -3157,7 +3162,7 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm"; + scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm" | "tcd"; /** * UNet * @description UNet (scheduler, LoRAs) @@ -4184,7 +4189,7 @@ export type components = { * @description The nodes in this graph */ nodes: { - [key: string]: components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"]; + [key: string]: components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["NoiseInvocation"]; }; /** * Edges @@ -4221,7 +4226,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["String2Output"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LoRALoaderOutput"]; + [key: string]: components["schemas"]["ImageOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["String2Output"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["CLIPOutput"]; }; /** * Errors @@ -4325,6 +4330,49 @@ export type components = { */ type: "hed_image_processor"; }; + /** + * Heuristic Resize + * @description Resize an image using a heuristic method. Preserves edge maps. + */ + HeuristicResizeInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** @description The image to resize */ + image?: components["schemas"]["ImageField"]; + /** + * Width + * @description The width to resize to (px) + * @default 512 + */ + width?: number; + /** + * Height + * @description The height to resize to (px) + * @default 512 + */ + height?: number; + /** + * type + * @default heuristic_resize + * @constant + */ + type: "heuristic_resize"; + }; /** * HuggingFaceMetadata * @description Extended metadata fields provided by HuggingFace. @@ -7040,7 +7088,7 @@ export type components = { * Scheduler * @description Default scheduler for this model */ - scheduler?: ("ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm") | null; + scheduler?: ("ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm" | "tcd") | null; /** * Steps * @description Default number of steps for this model @@ -9235,7 +9283,7 @@ export type components = { * @default euler * @enum {string} */ - scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm"; + scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm" | "tcd"; /** * type * @default scheduler @@ -9250,7 +9298,7 @@ export type components = { * @description Scheduler to use during inference * @enum {string} */ - scheduler: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm"; + scheduler: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "lcm" | "tcd"; /** * type * @default scheduler_output diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index 7dde5fb624..5b88170d41 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -177,6 +177,22 @@ type ControlAdapterAction = { id: string; }; +export type CALayerImagePostUploadAction = { + type: 'SET_CA_LAYER_IMAGE'; + layerId: string; +}; + +export type IPALayerImagePostUploadAction = { + type: 'SET_IPA_LAYER_IMAGE'; + layerId: string; +}; + +export type RGLayerIPAdapterImagePostUploadAction = { + type: 'SET_RG_LAYER_IP_ADAPTER_IMAGE'; + layerId: string; + ipAdapterId: string; +}; + type InitialImageAction = { type: 'SET_INITIAL_IMAGE'; }; @@ -206,4 +222,7 @@ export type PostUploadAction = | NodesAction | CanvasInitialImageAction | ToastAction - | AddToBatchAction; + | AddToBatchAction + | CALayerImagePostUploadAction + | IPALayerImagePostUploadAction + | RGLayerIPAdapterImagePostUploadAction;