mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into lstein/feat/simple-mm2-api
This commit is contained in:
commit
3b64e7a1fd
@ -167,13 +167,13 @@ class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Canny Processor",
|
||||
tags=["controlnet", "canny"],
|
||||
category="controlnet",
|
||||
version="1.3.2",
|
||||
version="1.3.3",
|
||||
)
|
||||
class CannyImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Canny edge detection for ControlNet"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
low_threshold: int = InputField(
|
||||
default=100, ge=0, le=255, description="The low threshold of the Canny pixel gradient (0-255)"
|
||||
)
|
||||
@ -201,13 +201,13 @@ class CannyImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="HED (softedge) Processor",
|
||||
tags=["controlnet", "hed", "softedge"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class HedImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies HED edge detection to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
# safe not supported in controlnet_aux v0.0.3
|
||||
# safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
|
||||
scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
|
||||
@ -230,13 +230,13 @@ class HedImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Lineart Processor",
|
||||
tags=["controlnet", "lineart"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class LineartImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies line art processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
coarse: bool = InputField(default=False, description="Whether to use coarse mode")
|
||||
|
||||
def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image:
|
||||
@ -252,13 +252,13 @@ class LineartImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Lineart Anime Processor",
|
||||
tags=["controlnet", "lineart", "anime"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies line art anime processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image:
|
||||
processor = LineartAnimeProcessor()
|
||||
@ -275,15 +275,15 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Midas Depth Processor",
|
||||
tags=["controlnet", "midas"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
version="1.2.4",
|
||||
)
|
||||
class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies Midas depth processing to image"""
|
||||
|
||||
a_mult: float = InputField(default=2.0, ge=0, description="Midas parameter `a_mult` (a = a_mult * PI)")
|
||||
bg_th: float = InputField(default=0.1, ge=0, description="Midas parameter `bg_th`")
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
# depth_and_normal not supported in controlnet_aux v0.0.3
|
||||
# depth_and_normal: bool = InputField(default=False, description="whether to use depth and normal mode")
|
||||
|
||||
@ -307,13 +307,13 @@ class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Normal BAE Processor",
|
||||
tags=["controlnet"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies NormalBae processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image:
|
||||
normalbae_processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
|
||||
@ -324,13 +324,13 @@ class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
|
||||
|
||||
@invocation(
|
||||
"mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.2.2"
|
||||
"mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.2.3"
|
||||
)
|
||||
class MlsdImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies MLSD processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
thr_v: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_v`")
|
||||
thr_d: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_d`")
|
||||
|
||||
@ -347,13 +347,13 @@ class MlsdImageProcessorInvocation(ImageProcessorInvocation):
|
||||
|
||||
|
||||
@invocation(
|
||||
"pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.2.2"
|
||||
"pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.2.3"
|
||||
)
|
||||
class PidiImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies PIDI processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
|
||||
scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
|
||||
|
||||
@ -374,13 +374,13 @@ class PidiImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Content Shuffle Processor",
|
||||
tags=["controlnet", "contentshuffle"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies content shuffle processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
h: int = InputField(default=512, ge=0, description="Content shuffle `h` parameter")
|
||||
w: int = InputField(default=512, ge=0, description="Content shuffle `w` parameter")
|
||||
f: int = InputField(default=256, ge=0, description="Content shuffle `f` parameter")
|
||||
@ -404,7 +404,7 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Zoe (Depth) Processor",
|
||||
tags=["controlnet", "zoe", "depth"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies Zoe depth processing to image"""
|
||||
@ -420,15 +420,15 @@ class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Mediapipe Face Processor",
|
||||
tags=["controlnet", "mediapipe", "face"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
version="1.2.4",
|
||||
)
|
||||
class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies mediapipe face processing to image"""
|
||||
|
||||
max_faces: int = InputField(default=1, ge=1, description="Maximum number of faces to detect")
|
||||
min_confidence: float = InputField(default=0.5, ge=0, le=1, description="Minimum confidence for face detection")
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image:
|
||||
mediapipe_face_processor = MediapipeFaceDetector()
|
||||
@ -447,7 +447,7 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Leres (Depth) Processor",
|
||||
tags=["controlnet", "leres", "depth"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class LeresImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies leres processing to image"""
|
||||
@ -455,8 +455,8 @@ class LeresImageProcessorInvocation(ImageProcessorInvocation):
|
||||
thr_a: float = InputField(default=0, description="Leres parameter `thr_a`")
|
||||
thr_b: float = InputField(default=0, description="Leres parameter `thr_b`")
|
||||
boost: bool = InputField(default=False, description="Whether to use boost mode")
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image:
|
||||
leres_processor = LeresDetector.from_pretrained("lllyasviel/Annotators")
|
||||
@ -476,7 +476,7 @@ class LeresImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Tile Resample Processor",
|
||||
tags=["controlnet", "tile"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class TileResamplerProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Tile resampler processor"""
|
||||
@ -516,13 +516,13 @@ class TileResamplerProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Segment Anything Processor",
|
||||
tags=["controlnet", "segmentanything"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
version="1.2.4",
|
||||
)
|
||||
class SegmentAnythingProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies segment anything processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image:
|
||||
# segment_anything_processor = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints")
|
||||
@ -563,12 +563,12 @@ class SamDetectorReproducibleColors(SamDetector):
|
||||
title="Color Map Processor",
|
||||
tags=["controlnet"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates a color map from the provided image"""
|
||||
|
||||
color_map_tile_size: int = InputField(default=64, ge=0, description=FieldDescriptions.tile_size)
|
||||
color_map_tile_size: int = InputField(default=64, ge=1, description=FieldDescriptions.tile_size)
|
||||
|
||||
def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image:
|
||||
np_image = np.array(image, dtype=np.uint8)
|
||||
@ -595,7 +595,7 @@ DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small"]
|
||||
title="Depth Anything Processor",
|
||||
tags=["controlnet", "depth", "depth anything"],
|
||||
category="controlnet",
|
||||
version="1.1.1",
|
||||
version="1.1.2",
|
||||
)
|
||||
class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates a depth map based on the Depth Anything algorithm"""
|
||||
@ -603,7 +603,7 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
model_size: DEPTH_ANYTHING_MODEL_SIZES = InputField(
|
||||
default="small", description="The size of the depth model to use"
|
||||
)
|
||||
resolution: int = InputField(default=512, ge=64, multiple_of=64, description=FieldDescriptions.image_res)
|
||||
resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image:
|
||||
def loader(model_path: Path):
|
||||
@ -622,7 +622,7 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="DW Openpose Image Processor",
|
||||
tags=["controlnet", "dwpose", "openpose"],
|
||||
category="controlnet",
|
||||
version="1.1.0",
|
||||
version="1.1.1",
|
||||
)
|
||||
class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates an openpose pose from an image using DWPose"""
|
||||
@ -630,7 +630,7 @@ class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
draw_body: bool = InputField(default=True)
|
||||
draw_face: bool = InputField(default=False)
|
||||
draw_hands: bool = InputField(default=False)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image, context: InvocationContext) -> Image.Image:
|
||||
dw_openpose = DWOpenposeDetector(context)
|
||||
@ -649,15 +649,15 @@ class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Heuristic Resize",
|
||||
tags=["image, controlnet"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class HeuristicResizeInvocation(BaseInvocation):
|
||||
"""Resize an image using a heuristic method. Preserves edge maps."""
|
||||
|
||||
image: ImageField = InputField(description="The image to resize")
|
||||
width: int = InputField(default=512, gt=0, description="The width to resize to (px)")
|
||||
height: int = InputField(default=512, gt=0, description="The height to resize to (px)")
|
||||
width: int = InputField(default=512, ge=1, description="The width to resize to (px)")
|
||||
height: int = InputField(default=512, ge=1, description="The height to resize to (px)")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name, "RGB")
|
||||
|
@ -3,7 +3,7 @@ import inspect
|
||||
import math
|
||||
from contextlib import ExitStack
|
||||
from functools import singledispatchmethod
|
||||
from typing import Any, Iterator, List, Literal, Optional, Tuple, Union
|
||||
from typing import Any, Dict, Iterator, List, Literal, Optional, Tuple, Union
|
||||
|
||||
import einops
|
||||
import numpy as np
|
||||
@ -11,7 +11,6 @@ import numpy.typing as npt
|
||||
import torch
|
||||
import torchvision
|
||||
import torchvision.transforms as T
|
||||
from diffusers import AutoencoderKL, AutoencoderTiny
|
||||
from diffusers.configuration_utils import ConfigMixin
|
||||
from diffusers.image_processor import VaeImageProcessor
|
||||
from diffusers.models.adapter import T2IAdapter
|
||||
@ -21,9 +20,12 @@ from diffusers.models.attention_processor import (
|
||||
LoRAXFormersAttnProcessor,
|
||||
XFormersAttnProcessor,
|
||||
)
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
|
||||
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
||||
from diffusers.schedulers import DPMSolverSDEScheduler
|
||||
from diffusers.schedulers import SchedulerMixin as Scheduler
|
||||
from diffusers.schedulers.scheduling_dpmsolver_sde import DPMSolverSDEScheduler
|
||||
from diffusers.schedulers.scheduling_tcd import TCDScheduler
|
||||
from diffusers.schedulers.scheduling_utils import SchedulerMixin as Scheduler
|
||||
from PIL import Image, ImageFilter
|
||||
from pydantic import field_validator
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
@ -521,9 +523,10 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
)
|
||||
|
||||
if is_sdxl:
|
||||
return SDXLConditioningInfo(
|
||||
embeds=text_embedding, pooled_embeds=pooled_embedding, add_time_ids=add_time_ids
|
||||
), regions
|
||||
return (
|
||||
SDXLConditioningInfo(embeds=text_embedding, pooled_embeds=pooled_embedding, add_time_ids=add_time_ids),
|
||||
regions,
|
||||
)
|
||||
return BasicConditioningInfo(embeds=text_embedding), regions
|
||||
|
||||
def get_conditioning_data(
|
||||
@ -825,7 +828,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
denoising_start: float,
|
||||
denoising_end: float,
|
||||
seed: int,
|
||||
) -> Tuple[int, List[int], int]:
|
||||
) -> Tuple[int, List[int], int, Dict[str, Any]]:
|
||||
assert isinstance(scheduler, ConfigMixin)
|
||||
if scheduler.config.get("cpu_only", False):
|
||||
scheduler.set_timesteps(steps, device="cpu")
|
||||
@ -853,13 +856,15 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
timesteps = timesteps[t_start_idx : t_start_idx + t_end_idx]
|
||||
num_inference_steps = len(timesteps) // scheduler.order
|
||||
|
||||
scheduler_step_kwargs = {}
|
||||
scheduler_step_kwargs: Dict[str, Any] = {}
|
||||
scheduler_step_signature = inspect.signature(scheduler.step)
|
||||
if "generator" in scheduler_step_signature.parameters:
|
||||
# At some point, someone decided that schedulers that accept a generator should use the original seed with
|
||||
# all bits flipped. I don't know the original rationale for this, but now we must keep it like this for
|
||||
# reproducibility.
|
||||
scheduler_step_kwargs = {"generator": torch.Generator(device=device).manual_seed(seed ^ 0xFFFFFFFF)}
|
||||
scheduler_step_kwargs.update({"generator": torch.Generator(device=device).manual_seed(seed ^ 0xFFFFFFFF)})
|
||||
if isinstance(scheduler, TCDScheduler):
|
||||
scheduler_step_kwargs.update({"eta": 1.0})
|
||||
|
||||
return num_inference_steps, timesteps, init_timestep, scheduler_step_kwargs
|
||||
|
||||
|
@ -13,6 +13,7 @@ from diffusers import (
|
||||
LCMScheduler,
|
||||
LMSDiscreteScheduler,
|
||||
PNDMScheduler,
|
||||
TCDScheduler,
|
||||
UniPCMultistepScheduler,
|
||||
)
|
||||
|
||||
@ -40,4 +41,5 @@ SCHEDULER_MAP = {
|
||||
"dpmpp_sde_k": (DPMSolverSDEScheduler, {"use_karras_sigmas": True, "noise_sampler_seed": 0}),
|
||||
"unipc": (UniPCMultistepScheduler, {"cpu_only": True}),
|
||||
"lcm": (LCMScheduler, {}),
|
||||
"tcd": (TCDScheduler, {}),
|
||||
}
|
||||
|
@ -25,7 +25,7 @@
|
||||
"typegen": "node scripts/typegen.js",
|
||||
"preview": "vite preview",
|
||||
"lint:knip": "knip",
|
||||
"lint:dpdm": "dpdm --no-warning --no-tree --transform --exit-code circular:0 src/main.tsx",
|
||||
"lint:dpdm": "dpdm --no-warning --no-tree --transform --exit-code circular:1 src/main.tsx",
|
||||
"lint:eslint": "eslint --max-warnings=0 .",
|
||||
"lint:prettier": "prettier --check .",
|
||||
"lint:tsc": "tsc --noEmit",
|
||||
|
@ -225,7 +225,7 @@
|
||||
"composition": "Composition Only",
|
||||
"safe": "Safe",
|
||||
"saveControlImage": "Save Control Image",
|
||||
"scribble": "scribble",
|
||||
"scribble": "Scribble",
|
||||
"selectModel": "Select a model",
|
||||
"selectCLIPVisionModel": "Select a CLIP Vision model",
|
||||
"setControlImageDimensions": "Copy size to W/H (optimize for model)",
|
||||
@ -917,6 +917,7 @@
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} missing input",
|
||||
"missingNodeTemplate": "Missing node template",
|
||||
"noControlImageForControlAdapter": "Control Adapter #{{number}} has no control image",
|
||||
"imageNotProcessedForControlAdapter": "Control Adapter #{{number}}'s image is not processed",
|
||||
"noInitialImageSelected": "No initial image selected",
|
||||
"noModelForControlAdapter": "Control Adapter #{{number}} has no model selected.",
|
||||
"incompatibleBaseModelForControlAdapter": "Control Adapter #{{number}} model is incompatible with main model.",
|
||||
@ -1542,6 +1543,8 @@
|
||||
"globalControlAdapterLayer": "Global $t(controlnet.controlAdapter_one) $t(unifiedCanvas.layer)",
|
||||
"globalIPAdapter": "Global $t(common.ipAdapter)",
|
||||
"globalIPAdapterLayer": "Global $t(common.ipAdapter) $t(unifiedCanvas.layer)",
|
||||
"opacityFilter": "Opacity Filter"
|
||||
"opacityFilter": "Opacity Filter",
|
||||
"clearProcessor": "Clear Processor",
|
||||
"resetProcessor": "Reset Processor to Defaults"
|
||||
}
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ import { addCanvasMaskSavedToGalleryListener } from 'app/store/middleware/listen
|
||||
import { addCanvasMaskToControlNetListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet';
|
||||
import { addCanvasMergedListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMerged';
|
||||
import { addCanvasSavedToGalleryListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasSavedToGallery';
|
||||
import { addControlLayersToControlAdapterBridge } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge';
|
||||
import { addControlAdapterPreprocessor } from 'app/store/middleware/listenerMiddleware/listeners/controlAdapterPreprocessor';
|
||||
import { addControlNetAutoProcessListener } from 'app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess';
|
||||
import { addControlNetImageProcessedListener } from 'app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed';
|
||||
import { addEnqueueRequestedCanvasListener } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedCanvas';
|
||||
@ -158,5 +158,4 @@ addUpscaleRequestedListener(startAppListening);
|
||||
addDynamicPromptsListener(startAppListening);
|
||||
|
||||
addSetDefaultSettingsListener(startAppListening);
|
||||
|
||||
addControlLayersToControlAdapterBridge(startAppListening);
|
||||
addControlAdapterPreprocessor(startAppListening);
|
||||
|
@ -0,0 +1,156 @@
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import {
|
||||
caLayerImageChanged,
|
||||
caLayerIsProcessingImageChanged,
|
||||
caLayerModelChanged,
|
||||
caLayerProcessedImageChanged,
|
||||
caLayerProcessorConfigChanged,
|
||||
isControlAdapterLayer,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { CONTROLNET_PROCESSORS } from 'features/controlLayers/util/controlAdapters';
|
||||
import { isImageOutput } from 'features/nodes/types/common';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { t } from 'i18next';
|
||||
import { isEqual } from 'lodash-es';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import type { BatchConfig, ImageDTO } from 'services/api/types';
|
||||
import { socketInvocationComplete } from 'services/events/actions';
|
||||
|
||||
const matcher = isAnyOf(caLayerImageChanged, caLayerProcessorConfigChanged, caLayerModelChanged);
|
||||
|
||||
const DEBOUNCE_MS = 300;
|
||||
const log = logger('session');
|
||||
|
||||
export const addControlAdapterPreprocessor = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
matcher,
|
||||
effect: async (action, { dispatch, getState, getOriginalState, cancelActiveListeners, delay, take }) => {
|
||||
const { layerId } = action.payload;
|
||||
const precheckLayerOriginal = getOriginalState()
|
||||
.controlLayers.present.layers.filter(isControlAdapterLayer)
|
||||
.find((l) => l.id === layerId);
|
||||
const precheckLayer = getState()
|
||||
.controlLayers.present.layers.filter(isControlAdapterLayer)
|
||||
.find((l) => l.id === layerId);
|
||||
|
||||
// Conditions to bail
|
||||
const layerDoesNotExist = !precheckLayer;
|
||||
const layerHasNoImage = !precheckLayer?.controlAdapter.image;
|
||||
const layerHasNoProcessorConfig = !precheckLayer?.controlAdapter.processorConfig;
|
||||
const layerIsAlreadyProcessingImage = precheckLayer?.controlAdapter.isProcessingImage;
|
||||
const areImageAndProcessorUnchanged =
|
||||
isEqual(precheckLayer?.controlAdapter.image, precheckLayerOriginal?.controlAdapter.image) &&
|
||||
isEqual(precheckLayer?.controlAdapter.processorConfig, precheckLayerOriginal?.controlAdapter.processorConfig);
|
||||
|
||||
if (
|
||||
layerDoesNotExist ||
|
||||
layerHasNoImage ||
|
||||
layerHasNoProcessorConfig ||
|
||||
areImageAndProcessorUnchanged ||
|
||||
layerIsAlreadyProcessingImage
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Cancel any in-progress instances of this listener
|
||||
cancelActiveListeners();
|
||||
log.trace('Control Layer CA auto-process triggered');
|
||||
|
||||
// Delay before starting actual work
|
||||
await delay(DEBOUNCE_MS);
|
||||
dispatch(caLayerIsProcessingImageChanged({ layerId, isProcessingImage: true }));
|
||||
|
||||
// Double-check that we are still eligible for processing
|
||||
const state = getState();
|
||||
const layer = state.controlLayers.present.layers.filter(isControlAdapterLayer).find((l) => l.id === layerId);
|
||||
const image = layer?.controlAdapter.image;
|
||||
const config = layer?.controlAdapter.processorConfig;
|
||||
|
||||
// If we have no image or there is no processor config, bail
|
||||
if (!layer || !image || !config) {
|
||||
return;
|
||||
}
|
||||
|
||||
// @ts-expect-error: TS isn't able to narrow the typing of buildNode and `config` will error...
|
||||
const processorNode = CONTROLNET_PROCESSORS[config.type].buildNode(image, config);
|
||||
const enqueueBatchArg: BatchConfig = {
|
||||
prepend: true,
|
||||
batch: {
|
||||
graph: {
|
||||
nodes: {
|
||||
[processorNode.id]: { ...processorNode, is_intermediate: true },
|
||||
},
|
||||
edges: [],
|
||||
},
|
||||
runs: 1,
|
||||
},
|
||||
};
|
||||
|
||||
try {
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
const enqueueResult = await req.unwrap();
|
||||
req.reset();
|
||||
log.debug({ enqueueResult: parseify(enqueueResult) }, t('queue.graphQueued'));
|
||||
|
||||
const [invocationCompleteAction] = await take(
|
||||
(action): action is ReturnType<typeof socketInvocationComplete> =>
|
||||
socketInvocationComplete.match(action) &&
|
||||
action.payload.data.queue_batch_id === enqueueResult.batch.batch_id &&
|
||||
action.payload.data.source_node_id === processorNode.id
|
||||
);
|
||||
|
||||
// We still have to check the output type
|
||||
if (isImageOutput(invocationCompleteAction.payload.data.result)) {
|
||||
const { image_name } = invocationCompleteAction.payload.data.result.image;
|
||||
|
||||
// Wait for the ImageDTO to be received
|
||||
const [{ payload }] = await take(
|
||||
(action) =>
|
||||
imagesApi.endpoints.getImageDTO.matchFulfilled(action) && action.payload.image_name === image_name
|
||||
);
|
||||
|
||||
const imageDTO = payload as ImageDTO;
|
||||
|
||||
log.debug({ layerId, imageDTO }, 'ControlNet image processed');
|
||||
|
||||
// Update the processed image in the store
|
||||
dispatch(
|
||||
caLayerProcessedImageChanged({
|
||||
layerId,
|
||||
imageDTO,
|
||||
})
|
||||
);
|
||||
dispatch(caLayerIsProcessingImageChanged({ layerId, isProcessingImage: false }));
|
||||
}
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
log.error({ enqueueBatchArg: parseify(enqueueBatchArg) }, t('queue.graphFailedToQueue'));
|
||||
dispatch(caLayerIsProcessingImageChanged({ layerId, isProcessingImage: false }));
|
||||
|
||||
if (error instanceof Object) {
|
||||
if ('data' in error && 'status' in error) {
|
||||
if (error.status === 403) {
|
||||
dispatch(caLayerImageChanged({ layerId, imageDTO: null }));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('queue.graphFailedToQueue'),
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
@ -1,144 +0,0 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { CONTROLNET_PROCESSORS } from 'features/controlAdapters/store/constants';
|
||||
import { controlAdapterAdded, controlAdapterRemoved } from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import type { ControlNetConfig, IPAdapterConfig } from 'features/controlAdapters/store/types';
|
||||
import { isControlAdapterProcessorType } from 'features/controlAdapters/store/types';
|
||||
import {
|
||||
controlAdapterLayerAdded,
|
||||
ipAdapterLayerAdded,
|
||||
layerDeleted,
|
||||
maskLayerIPAdapterAdded,
|
||||
maskLayerIPAdapterDeleted,
|
||||
regionalGuidanceLayerAdded,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { Layer } from 'features/controlLayers/store/types';
|
||||
import { modelConfigsAdapterSelectors, modelsApi } from 'services/api/endpoints/models';
|
||||
import { isControlNetModelConfig, isIPAdapterModelConfig } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
|
||||
export const guidanceLayerAdded = createAction<Layer['type']>('controlLayers/guidanceLayerAdded');
|
||||
export const guidanceLayerDeleted = createAction<string>('controlLayers/guidanceLayerDeleted');
|
||||
export const allLayersDeleted = createAction('controlLayers/allLayersDeleted');
|
||||
export const guidanceLayerIPAdapterAdded = createAction<string>('controlLayers/guidanceLayerIPAdapterAdded');
|
||||
export const guidanceLayerIPAdapterDeleted = createAction<{ layerId: string; ipAdapterId: string }>(
|
||||
'controlLayers/guidanceLayerIPAdapterDeleted'
|
||||
);
|
||||
|
||||
export const addControlLayersToControlAdapterBridge = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
actionCreator: guidanceLayerAdded,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
const type = action.payload;
|
||||
const layerId = uuidv4();
|
||||
if (type === 'regional_guidance_layer') {
|
||||
dispatch(regionalGuidanceLayerAdded({ layerId }));
|
||||
return;
|
||||
}
|
||||
|
||||
const state = getState();
|
||||
const baseModel = state.generation.model?.base;
|
||||
const modelConfigs = modelsApi.endpoints.getModelConfigs.select(undefined)(state).data;
|
||||
|
||||
if (type === 'ip_adapter_layer') {
|
||||
const ipAdapterId = uuidv4();
|
||||
const overrides: Partial<IPAdapterConfig> = {
|
||||
id: ipAdapterId,
|
||||
};
|
||||
|
||||
// Find and select the first matching model
|
||||
if (modelConfigs) {
|
||||
const models = modelConfigsAdapterSelectors.selectAll(modelConfigs).filter(isIPAdapterModelConfig);
|
||||
overrides.model = models.find((m) => m.base === baseModel) ?? null;
|
||||
}
|
||||
dispatch(controlAdapterAdded({ type: 'ip_adapter', overrides }));
|
||||
dispatch(ipAdapterLayerAdded({ layerId, ipAdapterId }));
|
||||
return;
|
||||
}
|
||||
|
||||
if (type === 'control_adapter_layer') {
|
||||
const controlNetId = uuidv4();
|
||||
const overrides: Partial<ControlNetConfig> = {
|
||||
id: controlNetId,
|
||||
};
|
||||
|
||||
// Find and select the first matching model
|
||||
if (modelConfigs) {
|
||||
const models = modelConfigsAdapterSelectors.selectAll(modelConfigs).filter(isControlNetModelConfig);
|
||||
const model = models.find((m) => m.base === baseModel) ?? null;
|
||||
overrides.model = model;
|
||||
const defaultPreprocessor = model?.default_settings?.preprocessor;
|
||||
overrides.processorType = isControlAdapterProcessorType(defaultPreprocessor) ? defaultPreprocessor : 'none';
|
||||
overrides.processorNode = CONTROLNET_PROCESSORS[overrides.processorType].buildDefaults(baseModel);
|
||||
}
|
||||
dispatch(controlAdapterAdded({ type: 'controlnet', overrides }));
|
||||
dispatch(controlAdapterLayerAdded({ layerId, controlNetId }));
|
||||
return;
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
actionCreator: guidanceLayerDeleted,
|
||||
effect: (action, { getState, dispatch }) => {
|
||||
const layerId = action.payload;
|
||||
const state = getState();
|
||||
const layer = state.controlLayers.present.layers.find((l) => l.id === layerId);
|
||||
assert(layer, `Layer ${layerId} not found`);
|
||||
|
||||
if (layer.type === 'ip_adapter_layer') {
|
||||
dispatch(controlAdapterRemoved({ id: layer.ipAdapterId }));
|
||||
} else if (layer.type === 'control_adapter_layer') {
|
||||
dispatch(controlAdapterRemoved({ id: layer.controlNetId }));
|
||||
} else if (layer.type === 'regional_guidance_layer') {
|
||||
for (const ipAdapterId of layer.ipAdapterIds) {
|
||||
dispatch(controlAdapterRemoved({ id: ipAdapterId }));
|
||||
}
|
||||
}
|
||||
dispatch(layerDeleted(layerId));
|
||||
},
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
actionCreator: allLayersDeleted,
|
||||
effect: (action, { dispatch, getOriginalState }) => {
|
||||
const state = getOriginalState();
|
||||
for (const layer of state.controlLayers.present.layers) {
|
||||
dispatch(guidanceLayerDeleted(layer.id));
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
actionCreator: guidanceLayerIPAdapterAdded,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
const layerId = action.payload;
|
||||
const ipAdapterId = uuidv4();
|
||||
const overrides: Partial<IPAdapterConfig> = {
|
||||
id: ipAdapterId,
|
||||
};
|
||||
|
||||
// Find and select the first matching model
|
||||
const state = getState();
|
||||
const baseModel = state.generation.model?.base;
|
||||
const modelConfigs = modelsApi.endpoints.getModelConfigs.select(undefined)(state).data;
|
||||
if (modelConfigs) {
|
||||
const models = modelConfigsAdapterSelectors.selectAll(modelConfigs).filter(isIPAdapterModelConfig);
|
||||
overrides.model = models.find((m) => m.base === baseModel) ?? null;
|
||||
}
|
||||
|
||||
dispatch(controlAdapterAdded({ type: 'ip_adapter', overrides }));
|
||||
dispatch(maskLayerIPAdapterAdded({ layerId, ipAdapterId }));
|
||||
},
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
actionCreator: guidanceLayerIPAdapterDeleted,
|
||||
effect: (action, { dispatch }) => {
|
||||
const { layerId, ipAdapterId } = action.payload;
|
||||
dispatch(controlAdapterRemoved({ id: ipAdapterId }));
|
||||
dispatch(maskLayerIPAdapterDeleted({ layerId, ipAdapterId }));
|
||||
},
|
||||
});
|
||||
};
|
@ -7,6 +7,11 @@ import {
|
||||
controlAdapterImageChanged,
|
||||
controlAdapterIsEnabledChanged,
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import {
|
||||
caLayerImageChanged,
|
||||
ipaLayerImageChanged,
|
||||
rgLayerIPAdapterImageChanged,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
@ -83,6 +88,61 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on Control Adapter Layer
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'SET_CA_LAYER_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const { layerId } = overData.context;
|
||||
dispatch(
|
||||
caLayerImageChanged({
|
||||
layerId,
|
||||
imageDTO: activeData.payload.imageDTO,
|
||||
})
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on IP Adapter Layer
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'SET_IPA_LAYER_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const { layerId } = overData.context;
|
||||
dispatch(
|
||||
ipaLayerImageChanged({
|
||||
layerId,
|
||||
imageDTO: activeData.payload.imageDTO,
|
||||
})
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on RG Layer IP Adapter
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'SET_RG_LAYER_IP_ADAPTER_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const { layerId, ipAdapterId } = overData.context;
|
||||
dispatch(
|
||||
rgLayerIPAdapterImageChanged({
|
||||
layerId,
|
||||
ipAdapterId,
|
||||
imageDTO: activeData.payload.imageDTO,
|
||||
})
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on Canvas
|
||||
*/
|
||||
|
@ -6,6 +6,11 @@ import {
|
||||
controlAdapterImageChanged,
|
||||
controlAdapterIsEnabledChanged,
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import {
|
||||
caLayerImageChanged,
|
||||
ipaLayerImageChanged,
|
||||
rgLayerIPAdapterImageChanged,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { initialImageChanged, selectOptimalDimension } from 'features/parameters/store/generationSlice';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
@ -108,6 +113,39 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
return;
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_CA_LAYER_IMAGE') {
|
||||
const { layerId } = postUploadAction;
|
||||
dispatch(caLayerImageChanged({ layerId, imageDTO }));
|
||||
dispatch(
|
||||
addToast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setControlImage'),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_IPA_LAYER_IMAGE') {
|
||||
const { layerId } = postUploadAction;
|
||||
dispatch(ipaLayerImageChanged({ layerId, imageDTO }));
|
||||
dispatch(
|
||||
addToast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setControlImage'),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_RG_LAYER_IP_ADAPTER_IMAGE') {
|
||||
const { layerId, ipAdapterId } = postUploadAction;
|
||||
dispatch(rgLayerIPAdapterImageChanged({ layerId, ipAdapterId, imageDTO }));
|
||||
dispatch(
|
||||
addToast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setControlImage'),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_INITIAL_IMAGE') {
|
||||
dispatch(initialImageChanged(imageDTO));
|
||||
dispatch(
|
||||
|
@ -16,6 +16,7 @@ import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import i18n from 'i18next';
|
||||
import { forEach } from 'lodash-es';
|
||||
import { getConnectedEdges } from 'reactflow';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const selector = createMemoizedSelector(
|
||||
[
|
||||
@ -97,71 +98,93 @@ const selector = createMemoizedSelector(
|
||||
reasons.push(i18n.t('parameters.invoke.noModelSelected'));
|
||||
}
|
||||
|
||||
let enabledControlAdapters = selectControlAdapterAll(controlAdapters).filter((ca) => ca.isEnabled);
|
||||
|
||||
if (activeTabName === 'txt2img') {
|
||||
// Special handling for control layers on txt2img
|
||||
const enabledControlLayersAdapterIds = controlLayers.present.layers
|
||||
// Handling for Control Layers - only exists on txt2img tab now
|
||||
controlLayers.present.layers
|
||||
.filter((l) => l.isEnabled)
|
||||
.flatMap((layer) => {
|
||||
if (layer.type === 'regional_guidance_layer') {
|
||||
return layer.ipAdapterIds;
|
||||
.flatMap((l) => {
|
||||
if (l.type === 'control_adapter_layer') {
|
||||
return l.controlAdapter;
|
||||
} else if (l.type === 'ip_adapter_layer') {
|
||||
return l.ipAdapter;
|
||||
} else if (l.type === 'regional_guidance_layer') {
|
||||
return l.ipAdapters;
|
||||
}
|
||||
if (layer.type === 'control_adapter_layer') {
|
||||
return [layer.controlNetId];
|
||||
assert(false);
|
||||
})
|
||||
.forEach((ca, i) => {
|
||||
const hasNoModel = !ca.model;
|
||||
const mismatchedModelBase = ca.model?.base !== model?.base;
|
||||
const hasNoImage = !ca.image;
|
||||
const imageNotProcessed =
|
||||
(ca.type === 'controlnet' || ca.type === 't2i_adapter') && !ca.processedImage && ca.processorConfig;
|
||||
|
||||
if (hasNoModel) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.noModelForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
if (layer.type === 'ip_adapter_layer') {
|
||||
return [layer.ipAdapterId];
|
||||
if (mismatchedModelBase) {
|
||||
// This should never happen, just a sanity check
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.incompatibleBaseModelForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
if (hasNoImage) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.noControlImageForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
if (imageNotProcessed) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.imageNotProcessedForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
enabledControlAdapters = enabledControlAdapters.filter((ca) => enabledControlLayersAdapterIds.includes(ca.id));
|
||||
} else {
|
||||
const allControlLayerAdapterIds = controlLayers.present.layers.flatMap((layer) => {
|
||||
if (layer.type === 'regional_guidance_layer') {
|
||||
return layer.ipAdapterIds;
|
||||
}
|
||||
if (layer.type === 'control_adapter_layer') {
|
||||
return [layer.controlNetId];
|
||||
}
|
||||
if (layer.type === 'ip_adapter_layer') {
|
||||
return [layer.ipAdapterId];
|
||||
}
|
||||
});
|
||||
enabledControlAdapters = enabledControlAdapters.filter((ca) => !allControlLayerAdapterIds.includes(ca.id));
|
||||
// Handling for all other tabs
|
||||
selectControlAdapterAll(controlAdapters)
|
||||
.filter((ca) => ca.isEnabled)
|
||||
.forEach((ca, i) => {
|
||||
if (!ca.isEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ca.model) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.noModelForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
} else if (ca.model.base !== model?.base) {
|
||||
// This should never happen, just a sanity check
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.incompatibleBaseModelForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
if (
|
||||
!ca.controlImage ||
|
||||
(isControlNetOrT2IAdapter(ca) && !ca.processedControlImage && ca.processorType !== 'none')
|
||||
) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.noControlImageForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
enabledControlAdapters.forEach((ca, i) => {
|
||||
if (!ca.isEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ca.model) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.noModelForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
} else if (ca.model.base !== model?.base) {
|
||||
// This should never happen, just a sanity check
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.incompatibleBaseModelForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
if (
|
||||
!ca.controlImage ||
|
||||
(isControlNetOrT2IAdapter(ca) && !ca.processedControlImage && ca.processorType !== 'none')
|
||||
) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.noControlImageForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return { isReady: !reasons.length, reasons };
|
||||
|
@ -8,6 +8,7 @@ import calculateScale from 'features/canvas/util/calculateScale';
|
||||
import { STAGE_PADDING_PERCENTAGE } from 'features/canvas/util/constants';
|
||||
import floorCoordinates from 'features/canvas/util/floorCoordinates';
|
||||
import getScaledBoundingBoxDimensions from 'features/canvas/util/getScaledBoundingBoxDimensions';
|
||||
import { calculateNewSize } from 'features/parameters/components/ImageSize/calculateNewSize';
|
||||
import { initialAspectRatioState } from 'features/parameters/components/ImageSize/constants';
|
||||
import type { AspectRatioState } from 'features/parameters/components/ImageSize/types';
|
||||
import { modelChanged } from 'features/parameters/store/generationSlice';
|
||||
@ -588,8 +589,9 @@ export const canvasSlice = createSlice({
|
||||
},
|
||||
extraReducers: (builder) => {
|
||||
builder.addCase(modelChanged, (state, action) => {
|
||||
if (action.meta.previousModel?.base === action.payload?.base) {
|
||||
// The base model hasn't changed, we don't need to optimize the size
|
||||
const newModel = action.payload;
|
||||
if (!newModel || action.meta.previousModel?.base === newModel.base) {
|
||||
// Model was cleared or the base didn't change
|
||||
return;
|
||||
}
|
||||
const optimalDimension = getOptimalDimension(action.payload);
|
||||
@ -597,14 +599,8 @@ export const canvasSlice = createSlice({
|
||||
if (getIsSizeOptimal(width, height, optimalDimension)) {
|
||||
return;
|
||||
}
|
||||
setBoundingBoxDimensionsReducer(
|
||||
state,
|
||||
{
|
||||
width,
|
||||
height,
|
||||
},
|
||||
optimalDimension
|
||||
);
|
||||
const newSize = calculateNewSize(state.aspectRatio.value, optimalDimension * optimalDimension);
|
||||
setBoundingBoxDimensionsReducer(state, newSize, optimalDimension);
|
||||
});
|
||||
|
||||
builder.addCase(socketQueueItemStatusChanged, (state, action) => {
|
||||
|
@ -1,5 +1,5 @@
|
||||
import type { PayloadAction, Update } from '@reduxjs/toolkit';
|
||||
import { createEntityAdapter, createSlice, isAnyOf } from '@reduxjs/toolkit';
|
||||
import { createEntityAdapter, createSlice } from '@reduxjs/toolkit';
|
||||
import { getSelectorsOptions } from 'app/store/createMemoizedSelector';
|
||||
import type { PersistConfig, RootState } from 'app/store/store';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
@ -481,8 +481,6 @@ export const {
|
||||
t2iAdaptersReset,
|
||||
} = controlAdaptersSlice.actions;
|
||||
|
||||
export const isAnyControlAdapterAdded = isAnyOf(controlAdapterAdded, controlAdapterRecalled);
|
||||
|
||||
export const selectControlAdaptersSlice = (state: RootState) => state.controlAdapters;
|
||||
|
||||
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
|
||||
|
@ -1,6 +1,7 @@
|
||||
import { Button, Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { guidanceLayerAdded } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useAddCALayer, useAddIPALayer } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { rgLayerAdded } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
@ -8,14 +9,10 @@ import { PiPlusBold } from 'react-icons/pi';
|
||||
export const AddLayerButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const addRegionalGuidanceLayer = useCallback(() => {
|
||||
dispatch(guidanceLayerAdded('regional_guidance_layer'));
|
||||
}, [dispatch]);
|
||||
const addControlAdapterLayer = useCallback(() => {
|
||||
dispatch(guidanceLayerAdded('control_adapter_layer'));
|
||||
}, [dispatch]);
|
||||
const addIPAdapterLayer = useCallback(() => {
|
||||
dispatch(guidanceLayerAdded('ip_adapter_layer'));
|
||||
const [addCALayer, isAddCALayerDisabled] = useAddCALayer();
|
||||
const [addIPALayer, isAddIPALayerDisabled] = useAddIPALayer();
|
||||
const addRGLayer = useCallback(() => {
|
||||
dispatch(rgLayerAdded());
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
@ -24,13 +21,13 @@ export const AddLayerButton = memo(() => {
|
||||
{t('controlLayers.addLayer')}
|
||||
</MenuButton>
|
||||
<MenuList>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidanceLayer}>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRGLayer}>
|
||||
{t('controlLayers.regionalGuidanceLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addControlAdapterLayer}>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addCALayer} isDisabled={isAddCALayerDisabled}>
|
||||
{t('controlLayers.globalControlAdapterLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addIPAdapterLayer}>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addIPALayer} isDisabled={isAddIPALayerDisabled}>
|
||||
{t('controlLayers.globalIPAdapterLayer')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
|
@ -1,11 +1,11 @@
|
||||
import { Button, Flex } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { guidanceLayerIPAdapterAdded } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAddIPAdapterToIPALayer } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import {
|
||||
isRegionalGuidanceLayer,
|
||||
maskLayerNegativePromptChanged,
|
||||
maskLayerPositivePromptChanged,
|
||||
rgLayerNegativePromptChanged,
|
||||
rgLayerPositivePromptChanged,
|
||||
selectControlLayersSlice,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
@ -19,6 +19,7 @@ type AddPromptButtonProps = {
|
||||
export const AddPromptButtons = ({ layerId }: AddPromptButtonProps) => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const [addIPAdapter, isAddIPAdapterDisabled] = useAddIPAdapterToIPALayer(layerId);
|
||||
const selectValidActions = useMemo(
|
||||
() =>
|
||||
createMemoizedSelector(selectControlLayersSlice, (controlLayers) => {
|
||||
@ -33,13 +34,10 @@ export const AddPromptButtons = ({ layerId }: AddPromptButtonProps) => {
|
||||
);
|
||||
const validActions = useAppSelector(selectValidActions);
|
||||
const addPositivePrompt = useCallback(() => {
|
||||
dispatch(maskLayerPositivePromptChanged({ layerId, prompt: '' }));
|
||||
dispatch(rgLayerPositivePromptChanged({ layerId, prompt: '' }));
|
||||
}, [dispatch, layerId]);
|
||||
const addNegativePrompt = useCallback(() => {
|
||||
dispatch(maskLayerNegativePromptChanged({ layerId, prompt: '' }));
|
||||
}, [dispatch, layerId]);
|
||||
const addIPAdapter = useCallback(() => {
|
||||
dispatch(guidanceLayerIPAdapterAdded(layerId));
|
||||
dispatch(rgLayerNegativePromptChanged({ layerId, prompt: '' }));
|
||||
}, [dispatch, layerId]);
|
||||
|
||||
return (
|
||||
@ -62,7 +60,13 @@ export const AddPromptButtons = ({ layerId }: AddPromptButtonProps) => {
|
||||
>
|
||||
{t('common.negativePrompt')}
|
||||
</Button>
|
||||
<Button size="sm" variant="ghost" leftIcon={<PiPlusBold />} onClick={addIPAdapter}>
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addIPAdapter}
|
||||
isDisabled={isAddIPAdapterDisabled}
|
||||
>
|
||||
{t('common.ipAdapter')}
|
||||
</Button>
|
||||
</Flex>
|
||||
|
@ -1,39 +1,22 @@
|
||||
import { Flex, Spacer, useDisclosure } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import CALayerOpacity from 'features/controlLayers/components/CALayerOpacity';
|
||||
import ControlAdapterLayerConfig from 'features/controlLayers/components/controlAdapterOverrides/ControlAdapterLayerConfig';
|
||||
import { LayerDeleteButton } from 'features/controlLayers/components/LayerDeleteButton';
|
||||
import { LayerMenu } from 'features/controlLayers/components/LayerMenu';
|
||||
import { LayerTitle } from 'features/controlLayers/components/LayerTitle';
|
||||
import { LayerVisibilityToggle } from 'features/controlLayers/components/LayerVisibilityToggle';
|
||||
import {
|
||||
isControlAdapterLayer,
|
||||
layerSelected,
|
||||
selectControlLayersSlice,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { assert } from 'tsafe';
|
||||
import { CALayerControlAdapterWrapper } from 'features/controlLayers/components/CALayer/CALayerControlAdapterWrapper';
|
||||
import { LayerDeleteButton } from 'features/controlLayers/components/LayerCommon/LayerDeleteButton';
|
||||
import { LayerMenu } from 'features/controlLayers/components/LayerCommon/LayerMenu';
|
||||
import { LayerTitle } from 'features/controlLayers/components/LayerCommon/LayerTitle';
|
||||
import { LayerVisibilityToggle } from 'features/controlLayers/components/LayerCommon/LayerVisibilityToggle';
|
||||
import { layerSelected, selectCALayerOrThrow } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
|
||||
import CALayerOpacity from './CALayerOpacity';
|
||||
|
||||
type Props = {
|
||||
layerId: string;
|
||||
};
|
||||
|
||||
export const CALayerListItem = memo(({ layerId }: Props) => {
|
||||
export const CALayer = memo(({ layerId }: Props) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const selector = useMemo(
|
||||
() =>
|
||||
createMemoizedSelector(selectControlLayersSlice, (controlLayers) => {
|
||||
const layer = controlLayers.present.layers.find((l) => l.id === layerId);
|
||||
assert(isControlAdapterLayer(layer), `Layer ${layerId} not found or not a ControlNet layer`);
|
||||
return {
|
||||
controlNetId: layer.controlNetId,
|
||||
isSelected: layerId === controlLayers.present.selectedLayerId,
|
||||
};
|
||||
}),
|
||||
[layerId]
|
||||
);
|
||||
const { controlNetId, isSelected } = useAppSelector(selector);
|
||||
const isSelected = useAppSelector((s) => selectCALayerOrThrow(s.controlLayers.present, layerId).isSelected);
|
||||
const onClickCapture = useCallback(() => {
|
||||
// Must be capture so that the layer is selected before deleting/resetting/etc
|
||||
dispatch(layerSelected(layerId));
|
||||
@ -60,7 +43,7 @@ export const CALayerListItem = memo(({ layerId }: Props) => {
|
||||
</Flex>
|
||||
{isOpen && (
|
||||
<Flex flexDir="column" gap={3} px={3} pb={3}>
|
||||
<ControlAdapterLayerConfig id={controlNetId} />
|
||||
<CALayerControlAdapterWrapper layerId={layerId} />
|
||||
</Flex>
|
||||
)}
|
||||
</Flex>
|
||||
@ -68,4 +51,4 @@ export const CALayerListItem = memo(({ layerId }: Props) => {
|
||||
);
|
||||
});
|
||||
|
||||
CALayerListItem.displayName = 'CALayerListItem';
|
||||
CALayer.displayName = 'CALayer';
|
@ -0,0 +1,121 @@
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { ControlAdapter } from 'features/controlLayers/components/ControlAndIPAdapter/ControlAdapter';
|
||||
import {
|
||||
caLayerControlModeChanged,
|
||||
caLayerImageChanged,
|
||||
caLayerModelChanged,
|
||||
caLayerProcessorConfigChanged,
|
||||
caOrIPALayerBeginEndStepPctChanged,
|
||||
caOrIPALayerWeightChanged,
|
||||
selectCALayerOrThrow,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { ControlMode, ProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { CALayerImageDropData } from 'features/dnd/types';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import type {
|
||||
CALayerImagePostUploadAction,
|
||||
ControlNetModelConfig,
|
||||
ImageDTO,
|
||||
T2IAdapterModelConfig,
|
||||
} from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
layerId: string;
|
||||
};
|
||||
|
||||
export const CALayerControlAdapterWrapper = memo(({ layerId }: Props) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const controlAdapter = useAppSelector((s) => selectCALayerOrThrow(s.controlLayers.present, layerId).controlAdapter);
|
||||
|
||||
const onChangeBeginEndStepPct = useCallback(
|
||||
(beginEndStepPct: [number, number]) => {
|
||||
dispatch(
|
||||
caOrIPALayerBeginEndStepPctChanged({
|
||||
layerId,
|
||||
beginEndStepPct,
|
||||
})
|
||||
);
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const onChangeControlMode = useCallback(
|
||||
(controlMode: ControlMode) => {
|
||||
dispatch(
|
||||
caLayerControlModeChanged({
|
||||
layerId,
|
||||
controlMode,
|
||||
})
|
||||
);
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const onChangeWeight = useCallback(
|
||||
(weight: number) => {
|
||||
dispatch(caOrIPALayerWeightChanged({ layerId, weight }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const onChangeProcessorConfig = useCallback(
|
||||
(processorConfig: ProcessorConfig | null) => {
|
||||
dispatch(caLayerProcessorConfigChanged({ layerId, processorConfig }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const onChangeModel = useCallback(
|
||||
(modelConfig: ControlNetModelConfig | T2IAdapterModelConfig) => {
|
||||
dispatch(
|
||||
caLayerModelChanged({
|
||||
layerId,
|
||||
modelConfig,
|
||||
})
|
||||
);
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const onChangeImage = useCallback(
|
||||
(imageDTO: ImageDTO | null) => {
|
||||
dispatch(caLayerImageChanged({ layerId, imageDTO }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const droppableData = useMemo<CALayerImageDropData>(
|
||||
() => ({
|
||||
actionType: 'SET_CA_LAYER_IMAGE',
|
||||
context: {
|
||||
layerId,
|
||||
},
|
||||
id: layerId,
|
||||
}),
|
||||
[layerId]
|
||||
);
|
||||
|
||||
const postUploadAction = useMemo<CALayerImagePostUploadAction>(
|
||||
() => ({
|
||||
layerId,
|
||||
type: 'SET_CA_LAYER_IMAGE',
|
||||
}),
|
||||
[layerId]
|
||||
);
|
||||
|
||||
return (
|
||||
<ControlAdapter
|
||||
controlAdapter={controlAdapter}
|
||||
onChangeBeginEndStepPct={onChangeBeginEndStepPct}
|
||||
onChangeControlMode={onChangeControlMode}
|
||||
onChangeWeight={onChangeWeight}
|
||||
onChangeProcessorConfig={onChangeProcessorConfig}
|
||||
onChangeModel={onChangeModel}
|
||||
onChangeImage={onChangeImage}
|
||||
droppableData={droppableData}
|
||||
postUploadAction={postUploadAction}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
CALayerControlAdapterWrapper.displayName = 'CALayerControlAdapterWrapper';
|
@ -15,7 +15,7 @@ import {
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { stopPropagation } from 'common/util/stopPropagation';
|
||||
import { useLayerOpacity } from 'features/controlLayers/hooks/layerStateHooks';
|
||||
import { isFilterEnabledChanged, layerOpacityChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { caLayerIsFilterEnabledChanged, caLayerOpacityChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@ -34,13 +34,13 @@ const CALayerOpacity = ({ layerId }: Props) => {
|
||||
const { opacity, isFilterEnabled } = useLayerOpacity(layerId);
|
||||
const onChangeOpacity = useCallback(
|
||||
(v: number) => {
|
||||
dispatch(layerOpacityChanged({ layerId, opacity: v / 100 }));
|
||||
dispatch(caLayerOpacityChanged({ layerId, opacity: v / 100 }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
const onChangeFilter = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
dispatch(isFilterEnabledChanged({ layerId, isFilterEnabled: e.target.checked }));
|
||||
dispatch(caLayerIsFilterEnabledChanged({ layerId, isFilterEnabled: e.target.checked }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
@ -0,0 +1,117 @@
|
||||
import { Box, Divider, Flex, Icon, IconButton } from '@invoke-ai/ui-library';
|
||||
import { ControlAdapterModelCombobox } from 'features/controlLayers/components/ControlAndIPAdapter/ControlAdapterModelCombobox';
|
||||
import type {
|
||||
ControlMode,
|
||||
ControlNetConfig,
|
||||
ProcessorConfig,
|
||||
T2IAdapterConfig,
|
||||
} from 'features/controlLayers/util/controlAdapters';
|
||||
import type { TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiCaretUpBold } from 'react-icons/pi';
|
||||
import { useToggle } from 'react-use';
|
||||
import type { ControlNetModelConfig, ImageDTO, PostUploadAction, T2IAdapterModelConfig } from 'services/api/types';
|
||||
|
||||
import { ControlAdapterBeginEndStepPct } from './ControlAdapterBeginEndStepPct';
|
||||
import { ControlAdapterControlModeSelect } from './ControlAdapterControlModeSelect';
|
||||
import { ControlAdapterImagePreview } from './ControlAdapterImagePreview';
|
||||
import { ControlAdapterProcessorConfig } from './ControlAdapterProcessorConfig';
|
||||
import { ControlAdapterProcessorTypeSelect } from './ControlAdapterProcessorTypeSelect';
|
||||
import { ControlAdapterWeight } from './ControlAdapterWeight';
|
||||
|
||||
type Props = {
|
||||
controlAdapter: ControlNetConfig | T2IAdapterConfig;
|
||||
onChangeBeginEndStepPct: (beginEndStepPct: [number, number]) => void;
|
||||
onChangeControlMode: (controlMode: ControlMode) => void;
|
||||
onChangeWeight: (weight: number) => void;
|
||||
onChangeProcessorConfig: (processorConfig: ProcessorConfig | null) => void;
|
||||
onChangeModel: (modelConfig: ControlNetModelConfig | T2IAdapterModelConfig) => void;
|
||||
onChangeImage: (imageDTO: ImageDTO | null) => void;
|
||||
droppableData: TypesafeDroppableData;
|
||||
postUploadAction: PostUploadAction;
|
||||
};
|
||||
|
||||
export const ControlAdapter = memo(
|
||||
({
|
||||
controlAdapter,
|
||||
onChangeBeginEndStepPct,
|
||||
onChangeControlMode,
|
||||
onChangeWeight,
|
||||
onChangeProcessorConfig,
|
||||
onChangeModel,
|
||||
onChangeImage,
|
||||
droppableData,
|
||||
postUploadAction,
|
||||
}: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const [isExpanded, toggleIsExpanded] = useToggle(false);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={3} position="relative" w="full">
|
||||
<Flex gap={3} alignItems="center" w="full">
|
||||
<Box minW={0} w="full" transitionProperty="common" transitionDuration="0.1s">
|
||||
<ControlAdapterModelCombobox modelKey={controlAdapter.model?.key ?? null} onChange={onChangeModel} />
|
||||
</Box>
|
||||
|
||||
<IconButton
|
||||
size="sm"
|
||||
tooltip={isExpanded ? t('controlnet.hideAdvanced') : t('controlnet.showAdvanced')}
|
||||
aria-label={isExpanded ? t('controlnet.hideAdvanced') : t('controlnet.showAdvanced')}
|
||||
onClick={toggleIsExpanded}
|
||||
variant="ghost"
|
||||
icon={
|
||||
<Icon
|
||||
boxSize={4}
|
||||
as={PiCaretUpBold}
|
||||
transform={isExpanded ? 'rotate(0deg)' : 'rotate(180deg)'}
|
||||
transitionProperty="common"
|
||||
transitionDuration="normal"
|
||||
/>
|
||||
}
|
||||
/>
|
||||
</Flex>
|
||||
<Flex gap={3} w="full">
|
||||
<Flex flexDir="column" gap={3} w="full" h="full">
|
||||
{controlAdapter.type === 'controlnet' && (
|
||||
<ControlAdapterControlModeSelect
|
||||
controlMode={controlAdapter.controlMode}
|
||||
onChange={onChangeControlMode}
|
||||
/>
|
||||
)}
|
||||
<ControlAdapterWeight weight={controlAdapter.weight} onChange={onChangeWeight} />
|
||||
<ControlAdapterBeginEndStepPct
|
||||
beginEndStepPct={controlAdapter.beginEndStepPct}
|
||||
onChange={onChangeBeginEndStepPct}
|
||||
/>
|
||||
</Flex>
|
||||
<Flex alignItems="center" justifyContent="center" h={36} w={36} aspectRatio="1/1">
|
||||
<ControlAdapterImagePreview
|
||||
controlAdapter={controlAdapter}
|
||||
onChangeImage={onChangeImage}
|
||||
droppableData={droppableData}
|
||||
postUploadAction={postUploadAction}
|
||||
/>
|
||||
</Flex>
|
||||
</Flex>
|
||||
{isExpanded && (
|
||||
<>
|
||||
<Divider />
|
||||
<Flex flexDir="column" gap={3} w="full">
|
||||
<ControlAdapterProcessorTypeSelect
|
||||
config={controlAdapter.processorConfig}
|
||||
onChange={onChangeProcessorConfig}
|
||||
/>
|
||||
<ControlAdapterProcessorConfig
|
||||
config={controlAdapter.processorConfig}
|
||||
onChange={onChangeProcessorConfig}
|
||||
/>
|
||||
</Flex>
|
||||
</>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
ControlAdapter.displayName = 'ControlAdapter';
|
@ -0,0 +1,43 @@
|
||||
import { CompositeRangeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
type Props = {
|
||||
beginEndStepPct: [number, number];
|
||||
onChange: (beginEndStepPct: [number, number]) => void;
|
||||
};
|
||||
|
||||
const formatPct = (v: number) => `${Math.round(v * 100)}%`;
|
||||
const ariaLabel = ['Begin Step %', 'End Step %'];
|
||||
|
||||
export const ControlAdapterBeginEndStepPct = memo(({ beginEndStepPct, onChange }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const onReset = useCallback(() => {
|
||||
onChange([0, 1]);
|
||||
}, [onChange]);
|
||||
|
||||
return (
|
||||
<FormControl orientation="horizontal">
|
||||
<InformationalPopover feature="controlNetBeginEnd">
|
||||
<FormLabel m={0}>{t('controlnet.beginEndStepPercentShort')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<CompositeRangeSlider
|
||||
aria-label={ariaLabel}
|
||||
value={beginEndStepPct}
|
||||
onChange={onChange}
|
||||
onReset={onReset}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.05}
|
||||
fineStep={0.01}
|
||||
minStepsBetweenThumbs={1}
|
||||
formatValue={formatPct}
|
||||
marks
|
||||
withThumbTooltip
|
||||
/>
|
||||
</FormControl>
|
||||
);
|
||||
});
|
||||
|
||||
ControlAdapterBeginEndStepPct.displayName = 'ControlAdapterBeginEndStepPct';
|
@ -1,24 +1,19 @@
|
||||
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { useControlAdapterControlMode } from 'features/controlAdapters/hooks/useControlAdapterControlMode';
|
||||
import { useControlAdapterIsEnabled } from 'features/controlAdapters/hooks/useControlAdapterIsEnabled';
|
||||
import { controlAdapterControlModeChanged } from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import type { ControlMode } from 'features/controlAdapters/store/types';
|
||||
import type { ControlMode } from 'features/controlLayers/util/controlAdapters';
|
||||
import { isControlMode } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
type Props = {
|
||||
id: string;
|
||||
controlMode: ControlMode;
|
||||
onChange: (controlMode: ControlMode) => void;
|
||||
};
|
||||
|
||||
const ParamControlAdapterControlMode = ({ id }: Props) => {
|
||||
const isEnabled = useControlAdapterIsEnabled(id);
|
||||
const controlMode = useControlAdapterControlMode(id);
|
||||
const dispatch = useAppDispatch();
|
||||
export const ControlAdapterControlModeSelect = memo(({ controlMode, onChange }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const CONTROL_MODE_DATA = useMemo(
|
||||
() => [
|
||||
{ label: t('controlnet.balanced'), value: 'balanced' },
|
||||
@ -31,17 +26,10 @@ const ParamControlAdapterControlMode = ({ id }: Props) => {
|
||||
|
||||
const handleControlModeChange = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
if (!v) {
|
||||
return;
|
||||
}
|
||||
dispatch(
|
||||
controlAdapterControlModeChanged({
|
||||
id,
|
||||
controlMode: v.value as ControlMode,
|
||||
})
|
||||
);
|
||||
assert(isControlMode(v?.value));
|
||||
onChange(v.value);
|
||||
},
|
||||
[id, dispatch]
|
||||
[onChange]
|
||||
);
|
||||
|
||||
const value = useMemo(
|
||||
@ -54,13 +42,19 @@ const ParamControlAdapterControlMode = ({ id }: Props) => {
|
||||
}
|
||||
|
||||
return (
|
||||
<FormControl isDisabled={!isEnabled}>
|
||||
<FormControl>
|
||||
<InformationalPopover feature="controlNetControlMode">
|
||||
<FormLabel m={0}>{t('controlnet.control')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<Combobox value={value} options={CONTROL_MODE_DATA} onChange={handleControlModeChange} />
|
||||
<Combobox
|
||||
value={value}
|
||||
options={CONTROL_MODE_DATA}
|
||||
onChange={handleControlModeChange}
|
||||
isClearable={false}
|
||||
isSearchable={false}
|
||||
/>
|
||||
</FormControl>
|
||||
);
|
||||
};
|
||||
});
|
||||
|
||||
export default memo(ParamControlAdapterControlMode);
|
||||
ControlAdapterControlModeSelect.displayName = 'ControlAdapterControlModeSelect';
|
@ -0,0 +1,215 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import { Box, Flex, Spinner, useShiftModifier } from '@invoke-ai/ui-library';
|
||||
import { skipToken } from '@reduxjs/toolkit/query';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import IAIDndImage from 'common/components/IAIDndImage';
|
||||
import IAIDndImageIcon from 'common/components/IAIDndImageIcon';
|
||||
import { setBoundingBoxDimensions } from 'features/canvas/store/canvasSlice';
|
||||
import { heightChanged, widthChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { ControlNetConfig, T2IAdapterConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { ImageDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { calculateNewSize } from 'features/parameters/components/ImageSize/calculateNewSize';
|
||||
import { selectOptimalDimension } from 'features/parameters/store/generationSlice';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import { memo, useCallback, useEffect, useMemo, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiArrowCounterClockwiseBold, PiFloppyDiskBold, PiRulerBold } from 'react-icons/pi';
|
||||
import {
|
||||
useAddImageToBoardMutation,
|
||||
useChangeImageIsIntermediateMutation,
|
||||
useGetImageDTOQuery,
|
||||
useRemoveImageFromBoardMutation,
|
||||
} from 'services/api/endpoints/images';
|
||||
import type { ImageDTO, PostUploadAction } from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
controlAdapter: ControlNetConfig | T2IAdapterConfig;
|
||||
onChangeImage: (imageDTO: ImageDTO | null) => void;
|
||||
droppableData: TypesafeDroppableData;
|
||||
postUploadAction: PostUploadAction;
|
||||
};
|
||||
|
||||
export const ControlAdapterImagePreview = memo(
|
||||
({ controlAdapter, onChangeImage, droppableData, postUploadAction }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const autoAddBoardId = useAppSelector((s) => s.gallery.autoAddBoardId);
|
||||
const isConnected = useAppSelector((s) => s.system.isConnected);
|
||||
const activeTabName = useAppSelector(activeTabNameSelector);
|
||||
const optimalDimension = useAppSelector(selectOptimalDimension);
|
||||
const shift = useShiftModifier();
|
||||
|
||||
const [isMouseOverImage, setIsMouseOverImage] = useState(false);
|
||||
|
||||
const { currentData: controlImage, isError: isErrorControlImage } = useGetImageDTOQuery(
|
||||
controlAdapter.image?.imageName ?? skipToken
|
||||
);
|
||||
const { currentData: processedControlImage, isError: isErrorProcessedControlImage } = useGetImageDTOQuery(
|
||||
controlAdapter.processedImage?.imageName ?? skipToken
|
||||
);
|
||||
|
||||
const [changeIsIntermediate] = useChangeImageIsIntermediateMutation();
|
||||
const [addToBoard] = useAddImageToBoardMutation();
|
||||
const [removeFromBoard] = useRemoveImageFromBoardMutation();
|
||||
const handleResetControlImage = useCallback(() => {
|
||||
onChangeImage(null);
|
||||
}, [onChangeImage]);
|
||||
|
||||
const handleSaveControlImage = useCallback(async () => {
|
||||
if (!processedControlImage) {
|
||||
return;
|
||||
}
|
||||
|
||||
await changeIsIntermediate({
|
||||
imageDTO: processedControlImage,
|
||||
is_intermediate: false,
|
||||
}).unwrap();
|
||||
|
||||
if (autoAddBoardId !== 'none') {
|
||||
addToBoard({
|
||||
imageDTO: processedControlImage,
|
||||
board_id: autoAddBoardId,
|
||||
});
|
||||
} else {
|
||||
removeFromBoard({ imageDTO: processedControlImage });
|
||||
}
|
||||
}, [processedControlImage, changeIsIntermediate, autoAddBoardId, addToBoard, removeFromBoard]);
|
||||
|
||||
const handleSetControlImageToDimensions = useCallback(() => {
|
||||
if (!controlImage) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (activeTabName === 'unifiedCanvas') {
|
||||
dispatch(
|
||||
setBoundingBoxDimensions({ width: controlImage.width, height: controlImage.height }, optimalDimension)
|
||||
);
|
||||
} else {
|
||||
if (shift) {
|
||||
const { width, height } = controlImage;
|
||||
dispatch(widthChanged({ width, updateAspectRatio: true }));
|
||||
dispatch(heightChanged({ height, updateAspectRatio: true }));
|
||||
} else {
|
||||
const { width, height } = calculateNewSize(
|
||||
controlImage.width / controlImage.height,
|
||||
optimalDimension * optimalDimension
|
||||
);
|
||||
dispatch(widthChanged({ width, updateAspectRatio: true }));
|
||||
dispatch(heightChanged({ height, updateAspectRatio: true }));
|
||||
}
|
||||
}
|
||||
}, [controlImage, activeTabName, dispatch, optimalDimension, shift]);
|
||||
|
||||
const handleMouseEnter = useCallback(() => {
|
||||
setIsMouseOverImage(true);
|
||||
}, []);
|
||||
|
||||
const handleMouseLeave = useCallback(() => {
|
||||
setIsMouseOverImage(false);
|
||||
}, []);
|
||||
|
||||
const draggableData = useMemo<ImageDraggableData | undefined>(() => {
|
||||
if (controlImage) {
|
||||
return {
|
||||
id: controlAdapter.id,
|
||||
payloadType: 'IMAGE_DTO',
|
||||
payload: { imageDTO: controlImage },
|
||||
};
|
||||
}
|
||||
}, [controlImage, controlAdapter.id]);
|
||||
|
||||
const shouldShowProcessedImage =
|
||||
controlImage &&
|
||||
processedControlImage &&
|
||||
!isMouseOverImage &&
|
||||
!controlAdapter.isProcessingImage &&
|
||||
controlAdapter.processorConfig !== null;
|
||||
|
||||
useEffect(() => {
|
||||
if (isConnected && (isErrorControlImage || isErrorProcessedControlImage)) {
|
||||
handleResetControlImage();
|
||||
}
|
||||
}, [handleResetControlImage, isConnected, isErrorControlImage, isErrorProcessedControlImage]);
|
||||
|
||||
return (
|
||||
<Flex
|
||||
onMouseEnter={handleMouseEnter}
|
||||
onMouseLeave={handleMouseLeave}
|
||||
position="relative"
|
||||
w="full"
|
||||
h={36}
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
>
|
||||
<IAIDndImage
|
||||
draggableData={draggableData}
|
||||
droppableData={droppableData}
|
||||
imageDTO={controlImage}
|
||||
isDropDisabled={shouldShowProcessedImage}
|
||||
postUploadAction={postUploadAction}
|
||||
/>
|
||||
|
||||
<Box
|
||||
position="absolute"
|
||||
top={0}
|
||||
insetInlineStart={0}
|
||||
w="full"
|
||||
h="full"
|
||||
opacity={shouldShowProcessedImage ? 1 : 0}
|
||||
transitionProperty="common"
|
||||
transitionDuration="normal"
|
||||
pointerEvents="none"
|
||||
>
|
||||
<IAIDndImage
|
||||
draggableData={draggableData}
|
||||
droppableData={droppableData}
|
||||
imageDTO={processedControlImage}
|
||||
isUploadDisabled={true}
|
||||
/>
|
||||
</Box>
|
||||
|
||||
<>
|
||||
<IAIDndImageIcon
|
||||
onClick={handleResetControlImage}
|
||||
icon={controlImage ? <PiArrowCounterClockwiseBold size={16} /> : undefined}
|
||||
tooltip={t('controlnet.resetControlImage')}
|
||||
/>
|
||||
<IAIDndImageIcon
|
||||
onClick={handleSaveControlImage}
|
||||
icon={controlImage ? <PiFloppyDiskBold size={16} /> : undefined}
|
||||
tooltip={t('controlnet.saveControlImage')}
|
||||
styleOverrides={saveControlImageStyleOverrides}
|
||||
/>
|
||||
<IAIDndImageIcon
|
||||
onClick={handleSetControlImageToDimensions}
|
||||
icon={controlImage ? <PiRulerBold size={16} /> : undefined}
|
||||
tooltip={shift ? t('controlnet.setControlImageDimensionsForce') : t('controlnet.setControlImageDimensions')}
|
||||
styleOverrides={setControlImageDimensionsStyleOverrides}
|
||||
/>
|
||||
</>
|
||||
|
||||
{controlAdapter.isProcessingImage && (
|
||||
<Flex
|
||||
position="absolute"
|
||||
top={0}
|
||||
insetInlineStart={0}
|
||||
w="full"
|
||||
h="full"
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
opacity={0.8}
|
||||
borderRadius="base"
|
||||
bg="base.900"
|
||||
>
|
||||
<Spinner size="xl" color="base.400" />
|
||||
</Flex>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
ControlAdapterImagePreview.displayName = 'ControlAdapterImagePreview';
|
||||
|
||||
const saveControlImageStyleOverrides: SystemStyleObject = { mt: 6 };
|
||||
const setControlImageDimensionsStyleOverrides: SystemStyleObject = { mt: 12 };
|
@ -0,0 +1,62 @@
|
||||
import { Combobox, FormControl, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useControlNetAndT2IAdapterModels } from 'services/api/hooks/modelsByType';
|
||||
import type { AnyModelConfig, ControlNetModelConfig, T2IAdapterModelConfig } from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
modelKey: string | null;
|
||||
onChange: (modelConfig: ControlNetModelConfig | T2IAdapterModelConfig) => void;
|
||||
};
|
||||
|
||||
export const ControlAdapterModelCombobox = memo(({ modelKey, onChange: onChangeModel }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const currentBaseModel = useAppSelector((s) => s.generation.model?.base);
|
||||
const [modelConfigs, { isLoading }] = useControlNetAndT2IAdapterModels();
|
||||
const selectedModel = useMemo(() => modelConfigs.find((m) => m.key === modelKey), [modelConfigs, modelKey]);
|
||||
|
||||
const _onChange = useCallback(
|
||||
(modelConfig: ControlNetModelConfig | T2IAdapterModelConfig | null) => {
|
||||
if (!modelConfig) {
|
||||
return;
|
||||
}
|
||||
onChangeModel(modelConfig);
|
||||
},
|
||||
[onChangeModel]
|
||||
);
|
||||
|
||||
const getIsDisabled = useCallback(
|
||||
(model: AnyModelConfig): boolean => {
|
||||
const isCompatible = currentBaseModel === model.base;
|
||||
const hasMainModel = Boolean(currentBaseModel);
|
||||
return !hasMainModel || !isCompatible;
|
||||
},
|
||||
[currentBaseModel]
|
||||
);
|
||||
|
||||
const { options, value, onChange, noOptionsMessage } = useGroupedModelCombobox({
|
||||
modelConfigs,
|
||||
onChange: _onChange,
|
||||
selectedModel,
|
||||
getIsDisabled,
|
||||
isLoading,
|
||||
});
|
||||
|
||||
return (
|
||||
<Tooltip label={selectedModel?.description}>
|
||||
<FormControl isInvalid={!value || currentBaseModel !== selectedModel?.base} w="full">
|
||||
<Combobox
|
||||
options={options}
|
||||
placeholder={t('controlnet.selectModel')}
|
||||
value={value}
|
||||
onChange={onChange}
|
||||
noOptionsMessage={noOptionsMessage}
|
||||
/>
|
||||
</FormControl>
|
||||
</Tooltip>
|
||||
);
|
||||
});
|
||||
|
||||
ControlAdapterModelCombobox.displayName = 'ControlAdapterModelCombobox';
|
@ -0,0 +1,85 @@
|
||||
import type { ProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo } from 'react';
|
||||
|
||||
import { CannyProcessor } from './processors/CannyProcessor';
|
||||
import { ColorMapProcessor } from './processors/ColorMapProcessor';
|
||||
import { ContentShuffleProcessor } from './processors/ContentShuffleProcessor';
|
||||
import { DepthAnythingProcessor } from './processors/DepthAnythingProcessor';
|
||||
import { DWOpenposeProcessor } from './processors/DWOpenposeProcessor';
|
||||
import { HedProcessor } from './processors/HedProcessor';
|
||||
import { LineartProcessor } from './processors/LineartProcessor';
|
||||
import { MediapipeFaceProcessor } from './processors/MediapipeFaceProcessor';
|
||||
import { MidasDepthProcessor } from './processors/MidasDepthProcessor';
|
||||
import { MlsdImageProcessor } from './processors/MlsdImageProcessor';
|
||||
import { PidiProcessor } from './processors/PidiProcessor';
|
||||
|
||||
type Props = {
|
||||
config: ProcessorConfig | null;
|
||||
onChange: (config: ProcessorConfig | null) => void;
|
||||
};
|
||||
|
||||
export const ControlAdapterProcessorConfig = memo(({ config, onChange }: Props) => {
|
||||
if (!config) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (config.type === 'canny_image_processor') {
|
||||
return <CannyProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'color_map_image_processor') {
|
||||
return <ColorMapProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'depth_anything_image_processor') {
|
||||
return <DepthAnythingProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'hed_image_processor') {
|
||||
return <HedProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'lineart_image_processor') {
|
||||
return <LineartProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'content_shuffle_image_processor') {
|
||||
return <ContentShuffleProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'lineart_anime_image_processor') {
|
||||
// No configurable options for this processor
|
||||
return null;
|
||||
}
|
||||
|
||||
if (config.type === 'mediapipe_face_processor') {
|
||||
return <MediapipeFaceProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'midas_depth_image_processor') {
|
||||
return <MidasDepthProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'mlsd_image_processor') {
|
||||
return <MlsdImageProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'normalbae_image_processor') {
|
||||
// No configurable options for this processor
|
||||
return null;
|
||||
}
|
||||
|
||||
if (config.type === 'dw_openpose_image_processor') {
|
||||
return <DWOpenposeProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'pidi_image_processor') {
|
||||
return <PidiProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'zoe_depth_image_processor') {
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
ControlAdapterProcessorConfig.displayName = 'ControlAdapterProcessorConfig';
|
@ -0,0 +1,70 @@
|
||||
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, Flex, FormControl, FormLabel, IconButton } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import type { ProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { CONTROLNET_PROCESSORS, isProcessorType } from 'features/controlLayers/util/controlAdapters';
|
||||
import { configSelector } from 'features/system/store/configSelectors';
|
||||
import { includes, map } from 'lodash-es';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiXBold } from 'react-icons/pi';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
type Props = {
|
||||
config: ProcessorConfig | null;
|
||||
onChange: (config: ProcessorConfig | null) => void;
|
||||
};
|
||||
|
||||
const selectDisabledProcessors = createMemoizedSelector(
|
||||
configSelector,
|
||||
(config) => config.sd.disabledControlNetProcessors
|
||||
);
|
||||
|
||||
export const ControlAdapterProcessorTypeSelect = memo(({ config, onChange }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const disabledProcessors = useAppSelector(selectDisabledProcessors);
|
||||
const options = useMemo(() => {
|
||||
return map(CONTROLNET_PROCESSORS, ({ labelTKey }, type) => ({ value: type, label: t(labelTKey) })).filter(
|
||||
(o) => !includes(disabledProcessors, o.value)
|
||||
);
|
||||
}, [disabledProcessors, t]);
|
||||
|
||||
const _onChange = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
if (!v) {
|
||||
onChange(null);
|
||||
} else {
|
||||
assert(isProcessorType(v.value));
|
||||
onChange(CONTROLNET_PROCESSORS[v.value].buildDefaults());
|
||||
}
|
||||
},
|
||||
[onChange]
|
||||
);
|
||||
const clearProcessor = useCallback(() => {
|
||||
onChange(null);
|
||||
}, [onChange]);
|
||||
const value = useMemo(() => options.find((o) => o.value === config?.type) ?? null, [options, config?.type]);
|
||||
|
||||
return (
|
||||
<Flex gap={2}>
|
||||
<FormControl>
|
||||
<InformationalPopover feature="controlNetProcessor">
|
||||
<FormLabel m={0}>{t('controlnet.processor')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<Combobox value={value} options={options} onChange={_onChange} isSearchable={false} isClearable={false} />
|
||||
</FormControl>
|
||||
<IconButton
|
||||
aria-label={t('controlLayers.clearProcessor')}
|
||||
onClick={clearProcessor}
|
||||
isDisabled={!config}
|
||||
icon={<PiXBold />}
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
/>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
ControlAdapterProcessorTypeSelect.displayName = 'ControlAdapterProcessorTypeSelect';
|
@ -1,24 +1,19 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { useControlAdapterIsEnabled } from 'features/controlAdapters/hooks/useControlAdapterIsEnabled';
|
||||
import { useControlAdapterWeight } from 'features/controlAdapters/hooks/useControlAdapterWeight';
|
||||
import { controlAdapterWeightChanged } from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import { isNil } from 'lodash-es';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
type ParamControlAdapterWeightProps = {
|
||||
id: string;
|
||||
type Props = {
|
||||
weight: number;
|
||||
onChange: (weight: number) => void;
|
||||
};
|
||||
|
||||
const formatValue = (v: number) => v.toFixed(2);
|
||||
const marks = [0, 1, 2];
|
||||
|
||||
const ParamControlAdapterWeight = ({ id }: ParamControlAdapterWeightProps) => {
|
||||
export const ControlAdapterWeight = memo(({ weight, onChange }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const isEnabled = useControlAdapterIsEnabled(id);
|
||||
const weight = useControlAdapterWeight(id);
|
||||
const initial = useAppSelector((s) => s.config.sd.ca.weight.initial);
|
||||
const sliderMin = useAppSelector((s) => s.config.sd.ca.weight.sliderMin);
|
||||
const sliderMax = useAppSelector((s) => s.config.sd.ca.weight.sliderMax);
|
||||
@ -27,20 +22,8 @@ const ParamControlAdapterWeight = ({ id }: ParamControlAdapterWeightProps) => {
|
||||
const coarseStep = useAppSelector((s) => s.config.sd.ca.weight.coarseStep);
|
||||
const fineStep = useAppSelector((s) => s.config.sd.ca.weight.fineStep);
|
||||
|
||||
const onChange = useCallback(
|
||||
(weight: number) => {
|
||||
dispatch(controlAdapterWeightChanged({ id, weight }));
|
||||
},
|
||||
[dispatch, id]
|
||||
);
|
||||
|
||||
if (isNil(weight)) {
|
||||
// should never happen
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<FormControl isDisabled={!isEnabled} orientation="horizontal">
|
||||
<FormControl orientation="horizontal">
|
||||
<InformationalPopover feature="controlNetWeight">
|
||||
<FormLabel m={0}>{t('controlnet.weight')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
@ -67,8 +50,6 @@ const ParamControlAdapterWeight = ({ id }: ParamControlAdapterWeightProps) => {
|
||||
/>
|
||||
</FormControl>
|
||||
);
|
||||
};
|
||||
});
|
||||
|
||||
export default memo(ParamControlAdapterWeight);
|
||||
|
||||
const marks = [0, 1, 2];
|
||||
ControlAdapterWeight.displayName = 'ControlAdapterWeight';
|
@ -0,0 +1,72 @@
|
||||
import { Box, Flex } from '@invoke-ai/ui-library';
|
||||
import { ControlAdapterBeginEndStepPct } from 'features/controlLayers/components/ControlAndIPAdapter/ControlAdapterBeginEndStepPct';
|
||||
import { ControlAdapterWeight } from 'features/controlLayers/components/ControlAndIPAdapter/ControlAdapterWeight';
|
||||
import { IPAdapterImagePreview } from 'features/controlLayers/components/ControlAndIPAdapter/IPAdapterImagePreview';
|
||||
import { IPAdapterMethod } from 'features/controlLayers/components/ControlAndIPAdapter/IPAdapterMethod';
|
||||
import { IPAdapterModelSelect } from 'features/controlLayers/components/ControlAndIPAdapter/IPAdapterModelSelect';
|
||||
import type { CLIPVisionModel, IPAdapterConfig, IPMethod } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { memo } from 'react';
|
||||
import type { ImageDTO, IPAdapterModelConfig, PostUploadAction } from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
ipAdapter: IPAdapterConfig;
|
||||
onChangeBeginEndStepPct: (beginEndStepPct: [number, number]) => void;
|
||||
onChangeWeight: (weight: number) => void;
|
||||
onChangeIPMethod: (method: IPMethod) => void;
|
||||
onChangeModel: (modelConfig: IPAdapterModelConfig) => void;
|
||||
onChangeCLIPVisionModel: (clipVisionModel: CLIPVisionModel) => void;
|
||||
onChangeImage: (imageDTO: ImageDTO | null) => void;
|
||||
droppableData: TypesafeDroppableData;
|
||||
postUploadAction: PostUploadAction;
|
||||
};
|
||||
|
||||
export const IPAdapter = memo(
|
||||
({
|
||||
ipAdapter,
|
||||
onChangeBeginEndStepPct,
|
||||
onChangeWeight,
|
||||
onChangeIPMethod,
|
||||
onChangeModel,
|
||||
onChangeCLIPVisionModel,
|
||||
onChangeImage,
|
||||
droppableData,
|
||||
postUploadAction,
|
||||
}: Props) => {
|
||||
return (
|
||||
<Flex flexDir="column" gap={4} position="relative" w="full">
|
||||
<Flex gap={3} alignItems="center" w="full">
|
||||
<Box minW={0} w="full" transitionProperty="common" transitionDuration="0.1s">
|
||||
<IPAdapterModelSelect
|
||||
modelKey={ipAdapter.model?.key ?? null}
|
||||
onChangeModel={onChangeModel}
|
||||
clipVisionModel={ipAdapter.clipVisionModel}
|
||||
onChangeCLIPVisionModel={onChangeCLIPVisionModel}
|
||||
/>
|
||||
</Box>
|
||||
</Flex>
|
||||
<Flex gap={4} w="full" alignItems="center">
|
||||
<Flex flexDir="column" gap={3} w="full">
|
||||
<IPAdapterMethod method={ipAdapter.method} onChange={onChangeIPMethod} />
|
||||
<ControlAdapterWeight weight={ipAdapter.weight} onChange={onChangeWeight} />
|
||||
<ControlAdapterBeginEndStepPct
|
||||
beginEndStepPct={ipAdapter.beginEndStepPct}
|
||||
onChange={onChangeBeginEndStepPct}
|
||||
/>
|
||||
</Flex>
|
||||
<Flex alignItems="center" justifyContent="center" h={36} w={36} aspectRatio="1/1">
|
||||
<IPAdapterImagePreview
|
||||
image={ipAdapter.image}
|
||||
onChangeImage={onChangeImage}
|
||||
ipAdapterId={ipAdapter.id}
|
||||
droppableData={droppableData}
|
||||
postUploadAction={postUploadAction}
|
||||
/>
|
||||
</Flex>
|
||||
</Flex>
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
IPAdapter.displayName = 'IPAdapter';
|
@ -0,0 +1,114 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import { Flex, useShiftModifier } from '@invoke-ai/ui-library';
|
||||
import { skipToken } from '@reduxjs/toolkit/query';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import IAIDndImage from 'common/components/IAIDndImage';
|
||||
import IAIDndImageIcon from 'common/components/IAIDndImageIcon';
|
||||
import { setBoundingBoxDimensions } from 'features/canvas/store/canvasSlice';
|
||||
import { heightChanged, widthChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { ImageWithDims } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { ImageDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { calculateNewSize } from 'features/parameters/components/ImageSize/calculateNewSize';
|
||||
import { selectOptimalDimension } from 'features/parameters/store/generationSlice';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import { memo, useCallback, useEffect, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiArrowCounterClockwiseBold, PiRulerBold } from 'react-icons/pi';
|
||||
import { useGetImageDTOQuery } from 'services/api/endpoints/images';
|
||||
import type { ImageDTO, PostUploadAction } from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
image: ImageWithDims | null;
|
||||
onChangeImage: (imageDTO: ImageDTO | null) => void;
|
||||
ipAdapterId: string; // required for the dnd/upload interactions
|
||||
droppableData: TypesafeDroppableData;
|
||||
postUploadAction: PostUploadAction;
|
||||
};
|
||||
|
||||
export const IPAdapterImagePreview = memo(
|
||||
({ image, onChangeImage, ipAdapterId, droppableData, postUploadAction }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const isConnected = useAppSelector((s) => s.system.isConnected);
|
||||
const activeTabName = useAppSelector(activeTabNameSelector);
|
||||
const optimalDimension = useAppSelector(selectOptimalDimension);
|
||||
const shift = useShiftModifier();
|
||||
|
||||
const { currentData: controlImage, isError: isErrorControlImage } = useGetImageDTOQuery(
|
||||
image?.imageName ?? skipToken
|
||||
);
|
||||
const handleResetControlImage = useCallback(() => {
|
||||
onChangeImage(null);
|
||||
}, [onChangeImage]);
|
||||
|
||||
const handleSetControlImageToDimensions = useCallback(() => {
|
||||
if (!controlImage) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (activeTabName === 'unifiedCanvas') {
|
||||
dispatch(
|
||||
setBoundingBoxDimensions({ width: controlImage.width, height: controlImage.height }, optimalDimension)
|
||||
);
|
||||
} else {
|
||||
if (shift) {
|
||||
const { width, height } = controlImage;
|
||||
dispatch(widthChanged({ width, updateAspectRatio: true }));
|
||||
dispatch(heightChanged({ height, updateAspectRatio: true }));
|
||||
} else {
|
||||
const { width, height } = calculateNewSize(
|
||||
controlImage.width / controlImage.height,
|
||||
optimalDimension * optimalDimension
|
||||
);
|
||||
dispatch(widthChanged({ width, updateAspectRatio: true }));
|
||||
dispatch(heightChanged({ height, updateAspectRatio: true }));
|
||||
}
|
||||
}
|
||||
}, [controlImage, activeTabName, dispatch, optimalDimension, shift]);
|
||||
|
||||
const draggableData = useMemo<ImageDraggableData | undefined>(() => {
|
||||
if (controlImage) {
|
||||
return {
|
||||
id: ipAdapterId,
|
||||
payloadType: 'IMAGE_DTO',
|
||||
payload: { imageDTO: controlImage },
|
||||
};
|
||||
}
|
||||
}, [controlImage, ipAdapterId]);
|
||||
|
||||
useEffect(() => {
|
||||
if (isConnected && isErrorControlImage) {
|
||||
handleResetControlImage();
|
||||
}
|
||||
}, [handleResetControlImage, isConnected, isErrorControlImage]);
|
||||
|
||||
return (
|
||||
<Flex position="relative" w="full" h={36} alignItems="center" justifyContent="center">
|
||||
<IAIDndImage
|
||||
draggableData={draggableData}
|
||||
droppableData={droppableData}
|
||||
imageDTO={controlImage}
|
||||
postUploadAction={postUploadAction}
|
||||
/>
|
||||
|
||||
<>
|
||||
<IAIDndImageIcon
|
||||
onClick={handleResetControlImage}
|
||||
icon={controlImage ? <PiArrowCounterClockwiseBold size={16} /> : undefined}
|
||||
tooltip={t('controlnet.resetControlImage')}
|
||||
/>
|
||||
<IAIDndImageIcon
|
||||
onClick={handleSetControlImageToDimensions}
|
||||
icon={controlImage ? <PiRulerBold size={16} /> : undefined}
|
||||
tooltip={shift ? t('controlnet.setControlImageDimensionsForce') : t('controlnet.setControlImageDimensions')}
|
||||
styleOverrides={setControlImageDimensionsStyleOverrides}
|
||||
/>
|
||||
</>
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
IPAdapterImagePreview.displayName = 'IPAdapterImagePreview';
|
||||
|
||||
const setControlImageDimensionsStyleOverrides: SystemStyleObject = { mt: 6 };
|
@ -0,0 +1,44 @@
|
||||
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import type { IPMethod } from 'features/controlLayers/util/controlAdapters';
|
||||
import { isIPMethod } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
type Props = {
|
||||
method: IPMethod;
|
||||
onChange: (method: IPMethod) => void;
|
||||
};
|
||||
|
||||
export const IPAdapterMethod = memo(({ method, onChange }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const options: { label: string; value: IPMethod }[] = useMemo(
|
||||
() => [
|
||||
{ label: t('controlnet.full'), value: 'full' },
|
||||
{ label: `${t('controlnet.style')} (${t('common.beta')})`, value: 'style' },
|
||||
{ label: `${t('controlnet.composition')} (${t('common.beta')})`, value: 'composition' },
|
||||
],
|
||||
[t]
|
||||
);
|
||||
const _onChange = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
assert(isIPMethod(v?.value));
|
||||
onChange(v.value);
|
||||
},
|
||||
[onChange]
|
||||
);
|
||||
const value = useMemo(() => options.find((o) => o.value === method), [options, method]);
|
||||
|
||||
return (
|
||||
<FormControl>
|
||||
<InformationalPopover feature="ipAdapterMethod">
|
||||
<FormLabel>{t('controlnet.ipAdapterMethod')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<Combobox value={value} options={options} onChange={_onChange} />
|
||||
</FormControl>
|
||||
);
|
||||
});
|
||||
|
||||
IPAdapterMethod.displayName = 'IPAdapterMethod';
|
@ -0,0 +1,100 @@
|
||||
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, Flex, FormControl, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox';
|
||||
import type { CLIPVisionModel } from 'features/controlLayers/util/controlAdapters';
|
||||
import { isCLIPVisionModel } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useIPAdapterModels } from 'services/api/hooks/modelsByType';
|
||||
import type { AnyModelConfig, IPAdapterModelConfig } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const CLIP_VISION_OPTIONS = [
|
||||
{ label: 'ViT-H', value: 'ViT-H' },
|
||||
{ label: 'ViT-G', value: 'ViT-G' },
|
||||
];
|
||||
|
||||
type Props = {
|
||||
modelKey: string | null;
|
||||
onChangeModel: (modelConfig: IPAdapterModelConfig) => void;
|
||||
clipVisionModel: CLIPVisionModel;
|
||||
onChangeCLIPVisionModel: (clipVisionModel: CLIPVisionModel) => void;
|
||||
};
|
||||
|
||||
export const IPAdapterModelSelect = memo(
|
||||
({ modelKey, onChangeModel, clipVisionModel, onChangeCLIPVisionModel }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const currentBaseModel = useAppSelector((s) => s.generation.model?.base);
|
||||
const [modelConfigs, { isLoading }] = useIPAdapterModels();
|
||||
const selectedModel = useMemo(() => modelConfigs.find((m) => m.key === modelKey), [modelConfigs, modelKey]);
|
||||
|
||||
const _onChangeModel = useCallback(
|
||||
(modelConfig: IPAdapterModelConfig | null) => {
|
||||
if (!modelConfig) {
|
||||
return;
|
||||
}
|
||||
onChangeModel(modelConfig);
|
||||
},
|
||||
[onChangeModel]
|
||||
);
|
||||
|
||||
const _onChangeCLIPVisionModel = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
assert(isCLIPVisionModel(v?.value));
|
||||
onChangeCLIPVisionModel(v.value);
|
||||
},
|
||||
[onChangeCLIPVisionModel]
|
||||
);
|
||||
|
||||
const getIsDisabled = useCallback(
|
||||
(model: AnyModelConfig): boolean => {
|
||||
const isCompatible = currentBaseModel === model.base;
|
||||
const hasMainModel = Boolean(currentBaseModel);
|
||||
return !hasMainModel || !isCompatible;
|
||||
},
|
||||
[currentBaseModel]
|
||||
);
|
||||
|
||||
const { options, value, onChange, noOptionsMessage } = useGroupedModelCombobox({
|
||||
modelConfigs,
|
||||
onChange: _onChangeModel,
|
||||
selectedModel,
|
||||
getIsDisabled,
|
||||
isLoading,
|
||||
});
|
||||
|
||||
const clipVisionModelValue = useMemo(
|
||||
() => CLIP_VISION_OPTIONS.find((o) => o.value === clipVisionModel),
|
||||
[clipVisionModel]
|
||||
);
|
||||
|
||||
return (
|
||||
<Flex gap={4}>
|
||||
<Tooltip label={selectedModel?.description}>
|
||||
<FormControl isInvalid={!value || currentBaseModel !== selectedModel?.base} w="full">
|
||||
<Combobox
|
||||
options={options}
|
||||
placeholder={t('controlnet.selectModel')}
|
||||
value={value}
|
||||
onChange={onChange}
|
||||
noOptionsMessage={noOptionsMessage}
|
||||
/>
|
||||
</FormControl>
|
||||
</Tooltip>
|
||||
{selectedModel?.format === 'checkpoint' && (
|
||||
<FormControl isInvalid={!value || currentBaseModel !== selectedModel?.base} width="max-content" minWidth={28}>
|
||||
<Combobox
|
||||
options={CLIP_VISION_OPTIONS}
|
||||
placeholder={t('controlnet.selectCLIPVisionModel')}
|
||||
value={clipVisionModelValue}
|
||||
onChange={_onChangeCLIPVisionModel}
|
||||
/>
|
||||
</FormControl>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
IPAdapterModelSelect.displayName = 'IPAdapterModelSelect';
|
@ -0,0 +1,67 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import { type CannyProcessorConfig, CONTROLNET_PROCESSORS } from 'features/controlLayers/util/controlAdapters';
|
||||
import { useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<CannyProcessorConfig>;
|
||||
const DEFAULTS = CONTROLNET_PROCESSORS['canny_image_processor'].buildDefaults();
|
||||
|
||||
export const CannyProcessor = ({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const handleLowThresholdChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, low_threshold: v });
|
||||
},
|
||||
[onChange, config]
|
||||
);
|
||||
const handleHighThresholdChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, high_threshold: v });
|
||||
},
|
||||
[onChange, config]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.lowThreshold')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.low_threshold}
|
||||
onChange={handleLowThresholdChanged}
|
||||
defaultValue={DEFAULTS.low_threshold}
|
||||
min={0}
|
||||
max={255}
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.low_threshold}
|
||||
onChange={handleLowThresholdChanged}
|
||||
defaultValue={DEFAULTS.low_threshold}
|
||||
min={0}
|
||||
max={255}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.highThreshold')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.high_threshold}
|
||||
onChange={handleHighThresholdChanged}
|
||||
defaultValue={DEFAULTS.high_threshold}
|
||||
min={0}
|
||||
max={255}
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.high_threshold}
|
||||
onChange={handleHighThresholdChanged}
|
||||
defaultValue={DEFAULTS.high_threshold}
|
||||
min={0}
|
||||
max={255}
|
||||
/>
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
};
|
||||
|
||||
CannyProcessor.displayName = 'CannyProcessor';
|
@ -0,0 +1,47 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import { type ColorMapProcessorConfig, CONTROLNET_PROCESSORS } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<ColorMapProcessorConfig>;
|
||||
const DEFAULTS = CONTROLNET_PROCESSORS['color_map_image_processor'].buildDefaults();
|
||||
|
||||
export const ColorMapProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const handleColorMapTileSizeChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, color_map_tile_size: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.colorMapTileSize')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.color_map_tile_size}
|
||||
defaultValue={DEFAULTS.color_map_tile_size}
|
||||
onChange={handleColorMapTileSizeChanged}
|
||||
min={1}
|
||||
max={256}
|
||||
step={1}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.color_map_tile_size}
|
||||
defaultValue={DEFAULTS.color_map_tile_size}
|
||||
onChange={handleColorMapTileSizeChanged}
|
||||
min={1}
|
||||
max={4096}
|
||||
step={1}
|
||||
/>
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
ColorMapProcessor.displayName = 'ColorMapProcessor';
|
@ -0,0 +1,79 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import type { ContentShuffleProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { CONTROLNET_PROCESSORS } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<ContentShuffleProcessorConfig>;
|
||||
const DEFAULTS = CONTROLNET_PROCESSORS['content_shuffle_image_processor'].buildDefaults();
|
||||
|
||||
export const ContentShuffleProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleWChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, w: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleHChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, h: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleFChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, f: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.w')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.w}
|
||||
defaultValue={DEFAULTS.w}
|
||||
onChange={handleWChanged}
|
||||
min={0}
|
||||
max={4096}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput value={config.w} defaultValue={DEFAULTS.w} onChange={handleWChanged} min={0} max={4096} />
|
||||
</FormControl>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.h')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.h}
|
||||
defaultValue={DEFAULTS.h}
|
||||
onChange={handleHChanged}
|
||||
min={0}
|
||||
max={4096}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput value={config.h} defaultValue={DEFAULTS.h} onChange={handleHChanged} min={0} max={4096} />
|
||||
</FormControl>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.f')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.f}
|
||||
defaultValue={DEFAULTS.f}
|
||||
onChange={handleFChanged}
|
||||
min={0}
|
||||
max={4096}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput value={config.f} defaultValue={DEFAULTS.f} onChange={handleFChanged} min={0} max={4096} />
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
ContentShuffleProcessor.displayName = 'ContentShuffleProcessor';
|
@ -0,0 +1,62 @@
|
||||
import { Flex, FormControl, FormLabel, Switch } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import type { DWOpenposeProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { CONTROLNET_PROCESSORS } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<DWOpenposeProcessorConfig>;
|
||||
const DEFAULTS = CONTROLNET_PROCESSORS['dw_openpose_image_processor'].buildDefaults();
|
||||
|
||||
export const DWOpenposeProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleDrawBodyChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
onChange({ ...config, draw_body: e.target.checked });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleDrawFaceChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
onChange({ ...config, draw_face: e.target.checked });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleDrawHandsChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
onChange({ ...config, draw_hands: e.target.checked });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<Flex sx={{ flexDir: 'row', gap: 6 }}>
|
||||
<FormControl w="max-content">
|
||||
<FormLabel m={0}>{t('controlnet.body')}</FormLabel>
|
||||
<Switch defaultChecked={DEFAULTS.draw_body} isChecked={config.draw_body} onChange={handleDrawBodyChanged} />
|
||||
</FormControl>
|
||||
<FormControl w="max-content">
|
||||
<FormLabel m={0}>{t('controlnet.face')}</FormLabel>
|
||||
<Switch defaultChecked={DEFAULTS.draw_face} isChecked={config.draw_face} onChange={handleDrawFaceChanged} />
|
||||
</FormControl>
|
||||
<FormControl w="max-content">
|
||||
<FormLabel m={0}>{t('controlnet.hands')}</FormLabel>
|
||||
<Switch
|
||||
defaultChecked={DEFAULTS.draw_hands}
|
||||
isChecked={config.draw_hands}
|
||||
onChange={handleDrawHandsChanged}
|
||||
/>
|
||||
</FormControl>
|
||||
</Flex>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
DWOpenposeProcessor.displayName = 'DWOpenposeProcessor';
|
@ -0,0 +1,52 @@
|
||||
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import type { DepthAnythingModelSize, DepthAnythingProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { CONTROLNET_PROCESSORS, isDepthAnythingModelSize } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<DepthAnythingProcessorConfig>;
|
||||
const DEFAULTS = CONTROLNET_PROCESSORS['depth_anything_image_processor'].buildDefaults();
|
||||
|
||||
export const DepthAnythingProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const handleModelSizeChange = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
if (!isDepthAnythingModelSize(v?.value)) {
|
||||
return;
|
||||
}
|
||||
onChange({ ...config, model_size: v.value });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const options: { label: string; value: DepthAnythingModelSize }[] = useMemo(
|
||||
() => [
|
||||
{ label: t('controlnet.small'), value: 'small' },
|
||||
{ label: t('controlnet.base'), value: 'base' },
|
||||
{ label: t('controlnet.large'), value: 'large' },
|
||||
],
|
||||
[t]
|
||||
);
|
||||
|
||||
const value = useMemo(() => options.filter((o) => o.value === config.model_size)[0], [options, config.model_size]);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.modelSize')}</FormLabel>
|
||||
<Combobox
|
||||
value={value}
|
||||
defaultInputValue={DEFAULTS.model_size}
|
||||
options={options}
|
||||
onChange={handleModelSizeChange}
|
||||
/>
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
DepthAnythingProcessor.displayName = 'DepthAnythingProcessor';
|
@ -0,0 +1,32 @@
|
||||
import { FormControl, FormLabel, Switch } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import type { HedProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<HedProcessorConfig>;
|
||||
|
||||
export const HedProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleScribbleChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
onChange({ ...config, scribble: e.target.checked });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.scribble')}</FormLabel>
|
||||
<Switch isChecked={config.scribble} onChange={handleScribbleChanged} />
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
HedProcessor.displayName = 'HedProcessor';
|
@ -0,0 +1,32 @@
|
||||
import { FormControl, FormLabel, Switch } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import type { LineartProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<LineartProcessorConfig>;
|
||||
|
||||
export const LineartProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleCoarseChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
onChange({ ...config, coarse: e.target.checked });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.coarse')}</FormLabel>
|
||||
<Switch isChecked={config.coarse} onChange={handleCoarseChanged} />
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
LineartProcessor.displayName = 'LineartProcessor';
|
@ -0,0 +1,73 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import { CONTROLNET_PROCESSORS, type MediapipeFaceProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<MediapipeFaceProcessorConfig>;
|
||||
const DEFAULTS = CONTROLNET_PROCESSORS['mediapipe_face_processor'].buildDefaults();
|
||||
|
||||
export const MediapipeFaceProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleMaxFacesChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, max_faces: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleMinConfidenceChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, min_confidence: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.maxFaces')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.max_faces}
|
||||
onChange={handleMaxFacesChanged}
|
||||
defaultValue={DEFAULTS.max_faces}
|
||||
min={1}
|
||||
max={20}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.max_faces}
|
||||
onChange={handleMaxFacesChanged}
|
||||
defaultValue={DEFAULTS.max_faces}
|
||||
min={1}
|
||||
max={20}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.minConfidence')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.min_confidence}
|
||||
onChange={handleMinConfidenceChanged}
|
||||
defaultValue={DEFAULTS.min_confidence}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.min_confidence}
|
||||
onChange={handleMinConfidenceChanged}
|
||||
defaultValue={DEFAULTS.min_confidence}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
/>
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
MediapipeFaceProcessor.displayName = 'MediapipeFaceProcessor';
|
@ -0,0 +1,76 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import type { MidasDepthProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { CONTROLNET_PROCESSORS } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<MidasDepthProcessorConfig>;
|
||||
const DEFAULTS = CONTROLNET_PROCESSORS['midas_depth_image_processor'].buildDefaults();
|
||||
|
||||
export const MidasDepthProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleAMultChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, a_mult: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleBgThChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, bg_th: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.amult')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.a_mult}
|
||||
onChange={handleAMultChanged}
|
||||
defaultValue={DEFAULTS.a_mult}
|
||||
min={0}
|
||||
max={20}
|
||||
step={0.01}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.a_mult}
|
||||
onChange={handleAMultChanged}
|
||||
defaultValue={DEFAULTS.a_mult}
|
||||
min={0}
|
||||
max={20}
|
||||
step={0.01}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.bgth')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.bg_th}
|
||||
onChange={handleBgThChanged}
|
||||
defaultValue={DEFAULTS.bg_th}
|
||||
min={0}
|
||||
max={20}
|
||||
step={0.01}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.bg_th}
|
||||
onChange={handleBgThChanged}
|
||||
defaultValue={DEFAULTS.bg_th}
|
||||
min={0}
|
||||
max={20}
|
||||
step={0.01}
|
||||
/>
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
MidasDepthProcessor.displayName = 'MidasDepthProcessor';
|
@ -0,0 +1,76 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import type { MlsdProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { CONTROLNET_PROCESSORS } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<MlsdProcessorConfig>;
|
||||
const DEFAULTS = CONTROLNET_PROCESSORS['mlsd_image_processor'].buildDefaults();
|
||||
|
||||
export const MlsdImageProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleThrDChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, thr_d: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleThrVChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, thr_v: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.w')} </FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.thr_d}
|
||||
onChange={handleThrDChanged}
|
||||
defaultValue={DEFAULTS.thr_d}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.thr_d}
|
||||
onChange={handleThrDChanged}
|
||||
defaultValue={DEFAULTS.thr_d}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.h')} </FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.thr_v}
|
||||
onChange={handleThrVChanged}
|
||||
defaultValue={DEFAULTS.thr_v}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.thr_v}
|
||||
onChange={handleThrVChanged}
|
||||
defaultValue={DEFAULTS.thr_v}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
/>
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
MlsdImageProcessor.displayName = 'MlsdImageProcessor';
|
@ -0,0 +1,43 @@
|
||||
import { FormControl, FormLabel, Switch } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import type { PidiProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<PidiProcessorConfig>;
|
||||
|
||||
export const PidiProcessor = ({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleScribbleChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
onChange({ ...config, scribble: e.target.checked });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleSafeChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
onChange({ ...config, safe: e.target.checked });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.scribble')}</FormLabel>
|
||||
<Switch isChecked={config.scribble} onChange={handleScribbleChanged} />
|
||||
</FormControl>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.safe')}</FormLabel>
|
||||
<Switch isChecked={config.safe} onChange={handleSafeChanged} />
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
};
|
||||
|
||||
PidiProcessor.displayName = 'PidiProcessor';
|
@ -0,0 +1,15 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import type { PropsWithChildren } from 'react';
|
||||
import { memo } from 'react';
|
||||
|
||||
type Props = PropsWithChildren;
|
||||
|
||||
const ProcessorWrapper = (props: Props) => {
|
||||
return (
|
||||
<Flex flexDir="column" gap={3}>
|
||||
{props.children}
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(ProcessorWrapper);
|
@ -0,0 +1,6 @@
|
||||
import type { ProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
|
||||
export type ProcessorComponentProps<T extends ProcessorConfig> = {
|
||||
onChange: (config: T) => void;
|
||||
config: T;
|
||||
};
|
@ -4,10 +4,10 @@ import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
|
||||
import { AddLayerButton } from 'features/controlLayers/components/AddLayerButton';
|
||||
import { CALayerListItem } from 'features/controlLayers/components/CALayerListItem';
|
||||
import { CALayer } from 'features/controlLayers/components/CALayer/CALayer';
|
||||
import { DeleteAllLayersButton } from 'features/controlLayers/components/DeleteAllLayersButton';
|
||||
import { IPLayerListItem } from 'features/controlLayers/components/IPLayerListItem';
|
||||
import { RGLayerListItem } from 'features/controlLayers/components/RGLayerListItem';
|
||||
import { IPALayer } from 'features/controlLayers/components/IPALayer/IPALayer';
|
||||
import { RGLayer } from 'features/controlLayers/components/RGLayer/RGLayer';
|
||||
import { isRenderableLayer, selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { Layer } from 'features/controlLayers/store/types';
|
||||
import { partition } from 'lodash-es';
|
||||
@ -46,13 +46,13 @@ type LayerWrapperProps = {
|
||||
|
||||
const LayerWrapper = memo(({ id, type }: LayerWrapperProps) => {
|
||||
if (type === 'regional_guidance_layer') {
|
||||
return <RGLayerListItem key={id} layerId={id} />;
|
||||
return <RGLayer key={id} layerId={id} />;
|
||||
}
|
||||
if (type === 'control_adapter_layer') {
|
||||
return <CALayerListItem key={id} layerId={id} />;
|
||||
return <CALayer key={id} layerId={id} />;
|
||||
}
|
||||
if (type === 'ip_adapter_layer') {
|
||||
return <IPLayerListItem key={id} layerId={id} />;
|
||||
return <IPALayer key={id} layerId={id} />;
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
import { Button } from '@invoke-ai/ui-library';
|
||||
import { allLayersDeleted } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { allLayersDeleted } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiTrashSimpleBold } from 'react-icons/pi';
|
||||
@ -8,12 +8,19 @@ import { PiTrashSimpleBold } from 'react-icons/pi';
|
||||
export const DeleteAllLayersButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const isDisabled = useAppSelector((s) => s.controlLayers.present.layers.length === 0);
|
||||
const onClick = useCallback(() => {
|
||||
dispatch(allLayersDeleted());
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
<Button onClick={onClick} leftIcon={<PiTrashSimpleBold />} variant="ghost" colorScheme="error">
|
||||
<Button
|
||||
onClick={onClick}
|
||||
leftIcon={<PiTrashSimpleBold />}
|
||||
variant="ghost"
|
||||
colorScheme="error"
|
||||
isDisabled={isDisabled}
|
||||
>
|
||||
{t('controlLayers.deleteAll')}
|
||||
</Button>
|
||||
);
|
||||
|
@ -0,0 +1,33 @@
|
||||
import { Flex, Spacer, useDisclosure } from '@invoke-ai/ui-library';
|
||||
import { IPALayerIPAdapterWrapper } from 'features/controlLayers/components/IPALayer/IPALayerIPAdapterWrapper';
|
||||
import { LayerDeleteButton } from 'features/controlLayers/components/LayerCommon/LayerDeleteButton';
|
||||
import { LayerTitle } from 'features/controlLayers/components/LayerCommon/LayerTitle';
|
||||
import { LayerVisibilityToggle } from 'features/controlLayers/components/LayerCommon/LayerVisibilityToggle';
|
||||
import { memo } from 'react';
|
||||
|
||||
type Props = {
|
||||
layerId: string;
|
||||
};
|
||||
|
||||
export const IPALayer = memo(({ layerId }: Props) => {
|
||||
const { isOpen, onToggle } = useDisclosure({ defaultIsOpen: true });
|
||||
return (
|
||||
<Flex gap={2} bg="base.800" borderRadius="base" p="1px" px={2}>
|
||||
<Flex flexDir="column" w="full" bg="base.850" borderRadius="base">
|
||||
<Flex gap={3} alignItems="center" p={3} cursor="pointer" onDoubleClick={onToggle}>
|
||||
<LayerVisibilityToggle layerId={layerId} />
|
||||
<LayerTitle type="ip_adapter_layer" />
|
||||
<Spacer />
|
||||
<LayerDeleteButton layerId={layerId} />
|
||||
</Flex>
|
||||
{isOpen && (
|
||||
<Flex flexDir="column" gap={3} px={3} pb={3}>
|
||||
<IPALayerIPAdapterWrapper layerId={layerId} />
|
||||
</Flex>
|
||||
)}
|
||||
</Flex>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
IPALayer.displayName = 'IPALayer';
|
@ -0,0 +1,106 @@
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { IPAdapter } from 'features/controlLayers/components/ControlAndIPAdapter/IPAdapter';
|
||||
import {
|
||||
caOrIPALayerBeginEndStepPctChanged,
|
||||
caOrIPALayerWeightChanged,
|
||||
ipaLayerCLIPVisionModelChanged,
|
||||
ipaLayerImageChanged,
|
||||
ipaLayerMethodChanged,
|
||||
ipaLayerModelChanged,
|
||||
selectIPALayerOrThrow,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { CLIPVisionModel, IPMethod } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { IPALayerImageDropData } from 'features/dnd/types';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import type { ImageDTO, IPAdapterModelConfig, IPALayerImagePostUploadAction } from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
layerId: string;
|
||||
};
|
||||
|
||||
export const IPALayerIPAdapterWrapper = memo(({ layerId }: Props) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const ipAdapter = useAppSelector((s) => selectIPALayerOrThrow(s.controlLayers.present, layerId).ipAdapter);
|
||||
|
||||
const onChangeBeginEndStepPct = useCallback(
|
||||
(beginEndStepPct: [number, number]) => {
|
||||
dispatch(
|
||||
caOrIPALayerBeginEndStepPctChanged({
|
||||
layerId,
|
||||
beginEndStepPct,
|
||||
})
|
||||
);
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const onChangeWeight = useCallback(
|
||||
(weight: number) => {
|
||||
dispatch(caOrIPALayerWeightChanged({ layerId, weight }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const onChangeIPMethod = useCallback(
|
||||
(method: IPMethod) => {
|
||||
dispatch(ipaLayerMethodChanged({ layerId, method }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const onChangeModel = useCallback(
|
||||
(modelConfig: IPAdapterModelConfig) => {
|
||||
dispatch(ipaLayerModelChanged({ layerId, modelConfig }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const onChangeCLIPVisionModel = useCallback(
|
||||
(clipVisionModel: CLIPVisionModel) => {
|
||||
dispatch(ipaLayerCLIPVisionModelChanged({ layerId, clipVisionModel }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const onChangeImage = useCallback(
|
||||
(imageDTO: ImageDTO | null) => {
|
||||
dispatch(ipaLayerImageChanged({ layerId, imageDTO }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const droppableData = useMemo<IPALayerImageDropData>(
|
||||
() => ({
|
||||
actionType: 'SET_IPA_LAYER_IMAGE',
|
||||
context: {
|
||||
layerId,
|
||||
},
|
||||
id: layerId,
|
||||
}),
|
||||
[layerId]
|
||||
);
|
||||
|
||||
const postUploadAction = useMemo<IPALayerImagePostUploadAction>(
|
||||
() => ({
|
||||
type: 'SET_IPA_LAYER_IMAGE',
|
||||
layerId,
|
||||
}),
|
||||
[layerId]
|
||||
);
|
||||
|
||||
return (
|
||||
<IPAdapter
|
||||
ipAdapter={ipAdapter}
|
||||
onChangeBeginEndStepPct={onChangeBeginEndStepPct}
|
||||
onChangeWeight={onChangeWeight}
|
||||
onChangeIPMethod={onChangeIPMethod}
|
||||
onChangeModel={onChangeModel}
|
||||
onChangeCLIPVisionModel={onChangeCLIPVisionModel}
|
||||
onChangeImage={onChangeImage}
|
||||
droppableData={droppableData}
|
||||
postUploadAction={postUploadAction}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
IPALayerIPAdapterWrapper.displayName = 'IPALayerIPAdapterWrapper';
|
@ -1,47 +0,0 @@
|
||||
import { Flex, Spacer, useDisclosure } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import ControlAdapterLayerConfig from 'features/controlLayers/components/controlAdapterOverrides/ControlAdapterLayerConfig';
|
||||
import { LayerDeleteButton } from 'features/controlLayers/components/LayerDeleteButton';
|
||||
import { LayerTitle } from 'features/controlLayers/components/LayerTitle';
|
||||
import { LayerVisibilityToggle } from 'features/controlLayers/components/LayerVisibilityToggle';
|
||||
import { isIPAdapterLayer, selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
type Props = {
|
||||
layerId: string;
|
||||
};
|
||||
|
||||
export const IPLayerListItem = memo(({ layerId }: Props) => {
|
||||
const selector = useMemo(
|
||||
() =>
|
||||
createMemoizedSelector(selectControlLayersSlice, (controlLayers) => {
|
||||
const layer = controlLayers.present.layers.find((l) => l.id === layerId);
|
||||
assert(isIPAdapterLayer(layer), `Layer ${layerId} not found or not an IP Adapter layer`);
|
||||
return layer.ipAdapterId;
|
||||
}),
|
||||
[layerId]
|
||||
);
|
||||
const ipAdapterId = useAppSelector(selector);
|
||||
const { isOpen, onToggle } = useDisclosure({ defaultIsOpen: true });
|
||||
return (
|
||||
<Flex gap={2} bg="base.800" borderRadius="base" p="1px" px={2}>
|
||||
<Flex flexDir="column" w="full" bg="base.850" borderRadius="base">
|
||||
<Flex gap={3} alignItems="center" p={3} cursor="pointer" onDoubleClick={onToggle}>
|
||||
<LayerVisibilityToggle layerId={layerId} />
|
||||
<LayerTitle type="ip_adapter_layer" />
|
||||
<Spacer />
|
||||
<LayerDeleteButton layerId={layerId} />
|
||||
</Flex>
|
||||
{isOpen && (
|
||||
<Flex flexDir="column" gap={3} px={3} pb={3}>
|
||||
<ControlAdapterLayerConfig id={ipAdapterId} />
|
||||
</Flex>
|
||||
)}
|
||||
</Flex>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
IPLayerListItem.displayName = 'IPLayerListItem';
|
@ -1,7 +1,7 @@
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { guidanceLayerDeleted } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { stopPropagation } from 'common/util/stopPropagation';
|
||||
import { layerDeleted } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiTrashSimpleBold } from 'react-icons/pi';
|
||||
@ -12,7 +12,7 @@ export const LayerDeleteButton = memo(({ layerId }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const deleteLayer = useCallback(() => {
|
||||
dispatch(guidanceLayerDeleted(layerId));
|
||||
dispatch(layerDeleted(layerId));
|
||||
}, [dispatch, layerId]);
|
||||
return (
|
||||
<IconButton
|
@ -1,8 +1,8 @@
|
||||
import { IconButton, Menu, MenuButton, MenuDivider, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { stopPropagation } from 'common/util/stopPropagation';
|
||||
import { LayerMenuArrangeActions } from 'features/controlLayers/components/LayerMenuArrangeActions';
|
||||
import { LayerMenuRGActions } from 'features/controlLayers/components/LayerMenuRGActions';
|
||||
import { LayerMenuArrangeActions } from 'features/controlLayers/components/LayerCommon/LayerMenuArrangeActions';
|
||||
import { LayerMenuRGActions } from 'features/controlLayers/components/LayerCommon/LayerMenuRGActions';
|
||||
import { useLayerType } from 'features/controlLayers/hooks/layerStateHooks';
|
||||
import { layerDeleted, layerReset } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { memo, useCallback } from 'react';
|
@ -1,11 +1,11 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { guidanceLayerIPAdapterAdded } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAddIPAdapterToIPALayer } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import {
|
||||
isRegionalGuidanceLayer,
|
||||
maskLayerNegativePromptChanged,
|
||||
maskLayerPositivePromptChanged,
|
||||
rgLayerNegativePromptChanged,
|
||||
rgLayerPositivePromptChanged,
|
||||
selectControlLayersSlice,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
@ -18,6 +18,7 @@ type Props = { layerId: string };
|
||||
export const LayerMenuRGActions = memo(({ layerId }: Props) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const { t } = useTranslation();
|
||||
const [addIPAdapter, isAddIPAdapterDisabled] = useAddIPAdapterToIPALayer(layerId);
|
||||
const selectValidActions = useMemo(
|
||||
() =>
|
||||
createMemoizedSelector(selectControlLayersSlice, (controlLayers) => {
|
||||
@ -32,13 +33,10 @@ export const LayerMenuRGActions = memo(({ layerId }: Props) => {
|
||||
);
|
||||
const validActions = useAppSelector(selectValidActions);
|
||||
const addPositivePrompt = useCallback(() => {
|
||||
dispatch(maskLayerPositivePromptChanged({ layerId, prompt: '' }));
|
||||
dispatch(rgLayerPositivePromptChanged({ layerId, prompt: '' }));
|
||||
}, [dispatch, layerId]);
|
||||
const addNegativePrompt = useCallback(() => {
|
||||
dispatch(maskLayerNegativePromptChanged({ layerId, prompt: '' }));
|
||||
}, [dispatch, layerId]);
|
||||
const addIPAdapter = useCallback(() => {
|
||||
dispatch(guidanceLayerIPAdapterAdded(layerId));
|
||||
dispatch(rgLayerNegativePromptChanged({ layerId, prompt: '' }));
|
||||
}, [dispatch, layerId]);
|
||||
return (
|
||||
<>
|
||||
@ -48,7 +46,7 @@ export const LayerMenuRGActions = memo(({ layerId }: Props) => {
|
||||
<MenuItem onClick={addNegativePrompt} isDisabled={!validActions.canAddNegativePrompt} icon={<PiPlusBold />}>
|
||||
{t('controlLayers.addNegativePrompt')}
|
||||
</MenuItem>
|
||||
<MenuItem onClick={addIPAdapter} icon={<PiPlusBold />}>
|
||||
<MenuItem onClick={addIPAdapter} icon={<PiPlusBold />} isDisabled={isAddIPAdapterDisabled}>
|
||||
{t('controlLayers.addIPAdapter')}
|
||||
</MenuItem>
|
||||
</>
|
@ -2,15 +2,11 @@ import { Badge, Flex, Spacer, useDisclosure } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { rgbColorToString } from 'features/canvas/util/colorToString';
|
||||
import { LayerDeleteButton } from 'features/controlLayers/components/LayerDeleteButton';
|
||||
import { LayerMenu } from 'features/controlLayers/components/LayerMenu';
|
||||
import { LayerTitle } from 'features/controlLayers/components/LayerTitle';
|
||||
import { LayerVisibilityToggle } from 'features/controlLayers/components/LayerVisibilityToggle';
|
||||
import { RGLayerColorPicker } from 'features/controlLayers/components/RGLayerColorPicker';
|
||||
import { RGLayerIPAdapterList } from 'features/controlLayers/components/RGLayerIPAdapterList';
|
||||
import { RGLayerNegativePrompt } from 'features/controlLayers/components/RGLayerNegativePrompt';
|
||||
import { RGLayerPositivePrompt } from 'features/controlLayers/components/RGLayerPositivePrompt';
|
||||
import RGLayerSettingsPopover from 'features/controlLayers/components/RGLayerSettingsPopover';
|
||||
import { AddPromptButtons } from 'features/controlLayers/components/AddPromptButtons';
|
||||
import { LayerDeleteButton } from 'features/controlLayers/components/LayerCommon/LayerDeleteButton';
|
||||
import { LayerMenu } from 'features/controlLayers/components/LayerCommon/LayerMenu';
|
||||
import { LayerTitle } from 'features/controlLayers/components/LayerCommon/LayerTitle';
|
||||
import { LayerVisibilityToggle } from 'features/controlLayers/components/LayerCommon/LayerVisibilityToggle';
|
||||
import {
|
||||
isRegionalGuidanceLayer,
|
||||
layerSelected,
|
||||
@ -20,13 +16,17 @@ import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
import { AddPromptButtons } from './AddPromptButtons';
|
||||
import { RGLayerColorPicker } from './RGLayerColorPicker';
|
||||
import { RGLayerIPAdapterList } from './RGLayerIPAdapterList';
|
||||
import { RGLayerNegativePrompt } from './RGLayerNegativePrompt';
|
||||
import { RGLayerPositivePrompt } from './RGLayerPositivePrompt';
|
||||
import RGLayerSettingsPopover from './RGLayerSettingsPopover';
|
||||
|
||||
type Props = {
|
||||
layerId: string;
|
||||
};
|
||||
|
||||
export const RGLayerListItem = memo(({ layerId }: Props) => {
|
||||
export const RGLayer = memo(({ layerId }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const selector = useMemo(
|
||||
@ -38,7 +38,7 @@ export const RGLayerListItem = memo(({ layerId }: Props) => {
|
||||
color: rgbColorToString(layer.previewColor),
|
||||
hasPositivePrompt: layer.positivePrompt !== null,
|
||||
hasNegativePrompt: layer.negativePrompt !== null,
|
||||
hasIPAdapters: layer.ipAdapterIds.length > 0,
|
||||
hasIPAdapters: layer.ipAdapters.length > 0,
|
||||
isSelected: layerId === controlLayers.present.selectedLayerId,
|
||||
autoNegative: layer.autoNegative,
|
||||
};
|
||||
@ -81,4 +81,4 @@ export const RGLayerListItem = memo(({ layerId }: Props) => {
|
||||
);
|
||||
});
|
||||
|
||||
RGLayerListItem.displayName = 'RGLayerListItem';
|
||||
RGLayer.displayName = 'RGLayer';
|
@ -3,7 +3,7 @@ import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import {
|
||||
isRegionalGuidanceLayer,
|
||||
maskLayerAutoNegativeChanged,
|
||||
rgLayerAutoNegativeChanged,
|
||||
selectControlLayersSlice,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { ChangeEvent } from 'react';
|
||||
@ -35,7 +35,7 @@ export const RGLayerAutoNegativeCheckbox = memo(({ layerId }: Props) => {
|
||||
const autoNegative = useAutoNegative(layerId);
|
||||
const onChange = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
dispatch(maskLayerAutoNegativeChanged({ layerId, autoNegative: e.target.checked ? 'invert' : 'off' }));
|
||||
dispatch(rgLayerAutoNegativeChanged({ layerId, autoNegative: e.target.checked ? 'invert' : 'off' }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
@ -6,7 +6,7 @@ import { stopPropagation } from 'common/util/stopPropagation';
|
||||
import { rgbColorToString } from 'features/canvas/util/colorToString';
|
||||
import {
|
||||
isRegionalGuidanceLayer,
|
||||
maskLayerPreviewColorChanged,
|
||||
rgLayerPreviewColorChanged,
|
||||
selectControlLayersSlice,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
@ -33,7 +33,7 @@ export const RGLayerColorPicker = memo(({ layerId }: Props) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const onColorChange = useCallback(
|
||||
(color: RgbColor) => {
|
||||
dispatch(maskLayerPreviewColorChanged({ layerId, color }));
|
||||
dispatch(rgLayerPreviewColorChanged({ layerId, color }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
@ -0,0 +1,45 @@
|
||||
import { Divider, Flex } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { RGLayerIPAdapterWrapper } from 'features/controlLayers/components/RGLayer/RGLayerIPAdapterWrapper';
|
||||
import { isRegionalGuidanceLayer, selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
type Props = {
|
||||
layerId: string;
|
||||
};
|
||||
|
||||
export const RGLayerIPAdapterList = memo(({ layerId }: Props) => {
|
||||
const selectIPAdapterIds = useMemo(
|
||||
() =>
|
||||
createMemoizedSelector(selectControlLayersSlice, (controlLayers) => {
|
||||
const layer = controlLayers.present.layers.filter(isRegionalGuidanceLayer).find((l) => l.id === layerId);
|
||||
assert(layer, `Layer ${layerId} not found`);
|
||||
return layer.ipAdapters;
|
||||
}),
|
||||
[layerId]
|
||||
);
|
||||
const ipAdapters = useAppSelector(selectIPAdapterIds);
|
||||
|
||||
if (ipAdapters.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
{ipAdapters.map(({ id }, index) => (
|
||||
<Flex flexDir="column" key={id}>
|
||||
{index > 0 && (
|
||||
<Flex pb={3}>
|
||||
<Divider />
|
||||
</Flex>
|
||||
)}
|
||||
<RGLayerIPAdapterWrapper layerId={layerId} ipAdapterId={id} ipAdapterNumber={index + 1} />
|
||||
</Flex>
|
||||
))}
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
RGLayerIPAdapterList.displayName = 'RGLayerIPAdapterList';
|
@ -0,0 +1,131 @@
|
||||
import { Flex, IconButton, Spacer, Text } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { IPAdapter } from 'features/controlLayers/components/ControlAndIPAdapter/IPAdapter';
|
||||
import {
|
||||
rgLayerIPAdapterBeginEndStepPctChanged,
|
||||
rgLayerIPAdapterCLIPVisionModelChanged,
|
||||
rgLayerIPAdapterDeleted,
|
||||
rgLayerIPAdapterImageChanged,
|
||||
rgLayerIPAdapterMethodChanged,
|
||||
rgLayerIPAdapterModelChanged,
|
||||
rgLayerIPAdapterWeightChanged,
|
||||
selectRGLayerIPAdapterOrThrow,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { CLIPVisionModel, IPMethod } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { RGLayerIPAdapterImageDropData } from 'features/dnd/types';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { PiTrashSimpleBold } from 'react-icons/pi';
|
||||
import type { ImageDTO, IPAdapterModelConfig, RGLayerIPAdapterImagePostUploadAction } from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
layerId: string;
|
||||
ipAdapterId: string;
|
||||
ipAdapterNumber: number;
|
||||
};
|
||||
|
||||
export const RGLayerIPAdapterWrapper = memo(({ layerId, ipAdapterId, ipAdapterNumber }: Props) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const onDeleteIPAdapter = useCallback(() => {
|
||||
dispatch(rgLayerIPAdapterDeleted({ layerId, ipAdapterId }));
|
||||
}, [dispatch, ipAdapterId, layerId]);
|
||||
const ipAdapter = useAppSelector((s) => selectRGLayerIPAdapterOrThrow(s.controlLayers.present, layerId, ipAdapterId));
|
||||
|
||||
const onChangeBeginEndStepPct = useCallback(
|
||||
(beginEndStepPct: [number, number]) => {
|
||||
dispatch(
|
||||
rgLayerIPAdapterBeginEndStepPctChanged({
|
||||
layerId,
|
||||
ipAdapterId,
|
||||
beginEndStepPct,
|
||||
})
|
||||
);
|
||||
},
|
||||
[dispatch, ipAdapterId, layerId]
|
||||
);
|
||||
|
||||
const onChangeWeight = useCallback(
|
||||
(weight: number) => {
|
||||
dispatch(rgLayerIPAdapterWeightChanged({ layerId, ipAdapterId, weight }));
|
||||
},
|
||||
[dispatch, ipAdapterId, layerId]
|
||||
);
|
||||
|
||||
const onChangeIPMethod = useCallback(
|
||||
(method: IPMethod) => {
|
||||
dispatch(rgLayerIPAdapterMethodChanged({ layerId, ipAdapterId, method }));
|
||||
},
|
||||
[dispatch, ipAdapterId, layerId]
|
||||
);
|
||||
|
||||
const onChangeModel = useCallback(
|
||||
(modelConfig: IPAdapterModelConfig) => {
|
||||
dispatch(rgLayerIPAdapterModelChanged({ layerId, ipAdapterId, modelConfig }));
|
||||
},
|
||||
[dispatch, ipAdapterId, layerId]
|
||||
);
|
||||
|
||||
const onChangeCLIPVisionModel = useCallback(
|
||||
(clipVisionModel: CLIPVisionModel) => {
|
||||
dispatch(rgLayerIPAdapterCLIPVisionModelChanged({ layerId, ipAdapterId, clipVisionModel }));
|
||||
},
|
||||
[dispatch, ipAdapterId, layerId]
|
||||
);
|
||||
|
||||
const onChangeImage = useCallback(
|
||||
(imageDTO: ImageDTO | null) => {
|
||||
dispatch(rgLayerIPAdapterImageChanged({ layerId, ipAdapterId, imageDTO }));
|
||||
},
|
||||
[dispatch, ipAdapterId, layerId]
|
||||
);
|
||||
|
||||
const droppableData = useMemo<RGLayerIPAdapterImageDropData>(
|
||||
() => ({
|
||||
actionType: 'SET_RG_LAYER_IP_ADAPTER_IMAGE',
|
||||
context: {
|
||||
layerId,
|
||||
ipAdapterId,
|
||||
},
|
||||
id: layerId,
|
||||
}),
|
||||
[ipAdapterId, layerId]
|
||||
);
|
||||
|
||||
const postUploadAction = useMemo<RGLayerIPAdapterImagePostUploadAction>(
|
||||
() => ({
|
||||
type: 'SET_RG_LAYER_IP_ADAPTER_IMAGE',
|
||||
layerId,
|
||||
ipAdapterId,
|
||||
}),
|
||||
[ipAdapterId, layerId]
|
||||
);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={3}>
|
||||
<Flex alignItems="center" gap={3}>
|
||||
<Text fontWeight="semibold" color="base.400">{`IP Adapter ${ipAdapterNumber}`}</Text>
|
||||
<Spacer />
|
||||
<IconButton
|
||||
size="sm"
|
||||
icon={<PiTrashSimpleBold />}
|
||||
aria-label="Delete IP Adapter"
|
||||
onClick={onDeleteIPAdapter}
|
||||
variant="ghost"
|
||||
colorScheme="error"
|
||||
/>
|
||||
</Flex>
|
||||
<IPAdapter
|
||||
ipAdapter={ipAdapter}
|
||||
onChangeBeginEndStepPct={onChangeBeginEndStepPct}
|
||||
onChangeWeight={onChangeWeight}
|
||||
onChangeIPMethod={onChangeIPMethod}
|
||||
onChangeModel={onChangeModel}
|
||||
onChangeCLIPVisionModel={onChangeCLIPVisionModel}
|
||||
onChangeImage={onChangeImage}
|
||||
droppableData={droppableData}
|
||||
postUploadAction={postUploadAction}
|
||||
/>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
RGLayerIPAdapterWrapper.displayName = 'RGLayerIPAdapterWrapper';
|
@ -1,8 +1,8 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { RGLayerPromptDeleteButton } from 'features/controlLayers/components/RGLayerPromptDeleteButton';
|
||||
import { RGLayerPromptDeleteButton } from 'features/controlLayers/components/RGLayer/RGLayerPromptDeleteButton';
|
||||
import { useLayerNegativePrompt } from 'features/controlLayers/hooks/layerStateHooks';
|
||||
import { maskLayerNegativePromptChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { rgLayerNegativePromptChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
|
||||
import { AddPromptTriggerButton } from 'features/prompt/AddPromptTriggerButton';
|
||||
import { PromptPopover } from 'features/prompt/PromptPopover';
|
||||
@ -21,7 +21,7 @@ export const RGLayerNegativePrompt = memo(({ layerId }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const _onChange = useCallback(
|
||||
(v: string) => {
|
||||
dispatch(maskLayerNegativePromptChanged({ layerId, prompt: v }));
|
||||
dispatch(rgLayerNegativePromptChanged({ layerId, prompt: v }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
@ -1,8 +1,8 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { RGLayerPromptDeleteButton } from 'features/controlLayers/components/RGLayerPromptDeleteButton';
|
||||
import { RGLayerPromptDeleteButton } from 'features/controlLayers/components/RGLayer/RGLayerPromptDeleteButton';
|
||||
import { useLayerPositivePrompt } from 'features/controlLayers/hooks/layerStateHooks';
|
||||
import { maskLayerPositivePromptChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { rgLayerPositivePromptChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
|
||||
import { AddPromptTriggerButton } from 'features/prompt/AddPromptTriggerButton';
|
||||
import { PromptPopover } from 'features/prompt/PromptPopover';
|
||||
@ -21,7 +21,7 @@ export const RGLayerPositivePrompt = memo(({ layerId }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const _onChange = useCallback(
|
||||
(v: string) => {
|
||||
dispatch(maskLayerPositivePromptChanged({ layerId, prompt: v }));
|
||||
dispatch(rgLayerPositivePromptChanged({ layerId, prompt: v }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
@ -1,8 +1,8 @@
|
||||
import { IconButton, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import {
|
||||
maskLayerNegativePromptChanged,
|
||||
maskLayerPositivePromptChanged,
|
||||
rgLayerNegativePromptChanged,
|
||||
rgLayerPositivePromptChanged,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@ -18,9 +18,9 @@ export const RGLayerPromptDeleteButton = memo(({ layerId, polarity }: Props) =>
|
||||
const dispatch = useAppDispatch();
|
||||
const onClick = useCallback(() => {
|
||||
if (polarity === 'positive') {
|
||||
dispatch(maskLayerPositivePromptChanged({ layerId, prompt: null }));
|
||||
dispatch(rgLayerPositivePromptChanged({ layerId, prompt: null }));
|
||||
} else {
|
||||
dispatch(maskLayerNegativePromptChanged({ layerId, prompt: null }));
|
||||
dispatch(rgLayerNegativePromptChanged({ layerId, prompt: null }));
|
||||
}
|
||||
}, [dispatch, layerId, polarity]);
|
||||
return (
|
@ -10,7 +10,7 @@ import {
|
||||
PopoverTrigger,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { stopPropagation } from 'common/util/stopPropagation';
|
||||
import { RGLayerAutoNegativeCheckbox } from 'features/controlLayers/components/RGLayerAutoNegativeCheckbox';
|
||||
import { RGLayerAutoNegativeCheckbox } from 'features/controlLayers/components/RGLayer/RGLayerAutoNegativeCheckbox';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiGearSixBold } from 'react-icons/pi';
|
@ -1,80 +0,0 @@
|
||||
import { Divider, Flex, IconButton, Spacer, Text } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { guidanceLayerIPAdapterDeleted } from 'app/store/middleware/listenerMiddleware/listeners/controlLayersToControlAdapterBridge';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import ControlAdapterLayerConfig from 'features/controlLayers/components/controlAdapterOverrides/ControlAdapterLayerConfig';
|
||||
import { isRegionalGuidanceLayer, selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { PiTrashSimpleBold } from 'react-icons/pi';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
type Props = {
|
||||
layerId: string;
|
||||
};
|
||||
|
||||
export const RGLayerIPAdapterList = memo(({ layerId }: Props) => {
|
||||
const selectIPAdapterIds = useMemo(
|
||||
() =>
|
||||
createMemoizedSelector(selectControlLayersSlice, (controlLayers) => {
|
||||
const layer = controlLayers.present.layers.filter(isRegionalGuidanceLayer).find((l) => l.id === layerId);
|
||||
assert(layer, `Layer ${layerId} not found`);
|
||||
return layer.ipAdapterIds;
|
||||
}),
|
||||
[layerId]
|
||||
);
|
||||
const ipAdapterIds = useAppSelector(selectIPAdapterIds);
|
||||
|
||||
if (ipAdapterIds.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
{ipAdapterIds.map((id, index) => (
|
||||
<Flex flexDir="column" key={id}>
|
||||
{index > 0 && (
|
||||
<Flex pb={3}>
|
||||
<Divider />
|
||||
</Flex>
|
||||
)}
|
||||
<RGLayerIPAdapterListItem layerId={layerId} ipAdapterId={id} ipAdapterNumber={index + 1} />
|
||||
</Flex>
|
||||
))}
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
RGLayerIPAdapterList.displayName = 'RGLayerIPAdapterList';
|
||||
|
||||
type IPAdapterListItemProps = {
|
||||
layerId: string;
|
||||
ipAdapterId: string;
|
||||
ipAdapterNumber: number;
|
||||
};
|
||||
|
||||
const RGLayerIPAdapterListItem = memo(({ layerId, ipAdapterId, ipAdapterNumber }: IPAdapterListItemProps) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const onDeleteIPAdapter = useCallback(() => {
|
||||
dispatch(guidanceLayerIPAdapterDeleted({ layerId, ipAdapterId }));
|
||||
}, [dispatch, ipAdapterId, layerId]);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={3}>
|
||||
<Flex alignItems="center" gap={3}>
|
||||
<Text fontWeight="semibold" color="base.400">{`IP Adapter ${ipAdapterNumber}`}</Text>
|
||||
<Spacer />
|
||||
<IconButton
|
||||
size="sm"
|
||||
icon={<PiTrashSimpleBold />}
|
||||
aria-label="Delete IP Adapter"
|
||||
onClick={onDeleteIPAdapter}
|
||||
variant="ghost"
|
||||
colorScheme="error"
|
||||
/>
|
||||
</Flex>
|
||||
<ControlAdapterLayerConfig id={ipAdapterId} />
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
RGLayerIPAdapterListItem.displayName = 'RGLayerIPAdapterListItem';
|
@ -1,237 +0,0 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import { Box, Flex, Spinner, useShiftModifier } from '@invoke-ai/ui-library';
|
||||
import { skipToken } from '@reduxjs/toolkit/query';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import IAIDndImage from 'common/components/IAIDndImage';
|
||||
import IAIDndImageIcon from 'common/components/IAIDndImageIcon';
|
||||
import { setBoundingBoxDimensions } from 'features/canvas/store/canvasSlice';
|
||||
import { useControlAdapterControlImage } from 'features/controlAdapters/hooks/useControlAdapterControlImage';
|
||||
import { useControlAdapterProcessedControlImage } from 'features/controlAdapters/hooks/useControlAdapterProcessedControlImage';
|
||||
import { useControlAdapterProcessorType } from 'features/controlAdapters/hooks/useControlAdapterProcessorType';
|
||||
import {
|
||||
controlAdapterImageChanged,
|
||||
selectControlAdaptersSlice,
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import { heightChanged, widthChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { calculateNewSize } from 'features/parameters/components/ImageSize/calculateNewSize';
|
||||
import { selectOptimalDimension } from 'features/parameters/store/generationSlice';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import { memo, useCallback, useEffect, useMemo, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiArrowCounterClockwiseBold, PiFloppyDiskBold, PiRulerBold } from 'react-icons/pi';
|
||||
import {
|
||||
useAddImageToBoardMutation,
|
||||
useChangeImageIsIntermediateMutation,
|
||||
useGetImageDTOQuery,
|
||||
useRemoveImageFromBoardMutation,
|
||||
} from 'services/api/endpoints/images';
|
||||
import type { PostUploadAction } from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
id: string;
|
||||
isSmall?: boolean;
|
||||
};
|
||||
|
||||
const selectPendingControlImages = createMemoizedSelector(
|
||||
selectControlAdaptersSlice,
|
||||
(controlAdapters) => controlAdapters.pendingControlImages
|
||||
);
|
||||
|
||||
const ControlAdapterImagePreview = ({ isSmall, id }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const controlImageName = useControlAdapterControlImage(id);
|
||||
const processedControlImageName = useControlAdapterProcessedControlImage(id);
|
||||
const processorType = useControlAdapterProcessorType(id);
|
||||
const autoAddBoardId = useAppSelector((s) => s.gallery.autoAddBoardId);
|
||||
const isConnected = useAppSelector((s) => s.system.isConnected);
|
||||
const activeTabName = useAppSelector(activeTabNameSelector);
|
||||
const optimalDimension = useAppSelector(selectOptimalDimension);
|
||||
const pendingControlImages = useAppSelector(selectPendingControlImages);
|
||||
const shift = useShiftModifier();
|
||||
|
||||
const [isMouseOverImage, setIsMouseOverImage] = useState(false);
|
||||
|
||||
const { currentData: controlImage, isError: isErrorControlImage } = useGetImageDTOQuery(
|
||||
controlImageName ?? skipToken
|
||||
);
|
||||
|
||||
const { currentData: processedControlImage, isError: isErrorProcessedControlImage } = useGetImageDTOQuery(
|
||||
processedControlImageName ?? skipToken
|
||||
);
|
||||
|
||||
const [changeIsIntermediate] = useChangeImageIsIntermediateMutation();
|
||||
const [addToBoard] = useAddImageToBoardMutation();
|
||||
const [removeFromBoard] = useRemoveImageFromBoardMutation();
|
||||
const handleResetControlImage = useCallback(() => {
|
||||
dispatch(controlAdapterImageChanged({ id, controlImage: null }));
|
||||
}, [id, dispatch]);
|
||||
|
||||
const handleSaveControlImage = useCallback(async () => {
|
||||
if (!processedControlImage) {
|
||||
return;
|
||||
}
|
||||
|
||||
await changeIsIntermediate({
|
||||
imageDTO: processedControlImage,
|
||||
is_intermediate: false,
|
||||
}).unwrap();
|
||||
|
||||
if (autoAddBoardId !== 'none') {
|
||||
addToBoard({
|
||||
imageDTO: processedControlImage,
|
||||
board_id: autoAddBoardId,
|
||||
});
|
||||
} else {
|
||||
removeFromBoard({ imageDTO: processedControlImage });
|
||||
}
|
||||
}, [processedControlImage, changeIsIntermediate, autoAddBoardId, addToBoard, removeFromBoard]);
|
||||
|
||||
const handleSetControlImageToDimensions = useCallback(() => {
|
||||
if (!controlImage) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (activeTabName === 'unifiedCanvas') {
|
||||
dispatch(setBoundingBoxDimensions({ width: controlImage.width, height: controlImage.height }, optimalDimension));
|
||||
} else {
|
||||
if (shift) {
|
||||
const { width, height } = controlImage;
|
||||
dispatch(widthChanged({ width, updateAspectRatio: true }));
|
||||
dispatch(heightChanged({ height, updateAspectRatio: true }));
|
||||
} else {
|
||||
const { width, height } = calculateNewSize(
|
||||
controlImage.width / controlImage.height,
|
||||
optimalDimension * optimalDimension
|
||||
);
|
||||
dispatch(widthChanged({ width, updateAspectRatio: true }));
|
||||
dispatch(heightChanged({ height, updateAspectRatio: true }));
|
||||
}
|
||||
}
|
||||
}, [controlImage, activeTabName, dispatch, optimalDimension, shift]);
|
||||
|
||||
const handleMouseEnter = useCallback(() => {
|
||||
setIsMouseOverImage(true);
|
||||
}, []);
|
||||
|
||||
const handleMouseLeave = useCallback(() => {
|
||||
setIsMouseOverImage(false);
|
||||
}, []);
|
||||
|
||||
const draggableData = useMemo<TypesafeDraggableData | undefined>(() => {
|
||||
if (controlImage) {
|
||||
return {
|
||||
id,
|
||||
payloadType: 'IMAGE_DTO',
|
||||
payload: { imageDTO: controlImage },
|
||||
};
|
||||
}
|
||||
}, [controlImage, id]);
|
||||
|
||||
const droppableData = useMemo<TypesafeDroppableData | undefined>(
|
||||
() => ({
|
||||
id,
|
||||
actionType: 'SET_CONTROL_ADAPTER_IMAGE',
|
||||
context: { id },
|
||||
}),
|
||||
[id]
|
||||
);
|
||||
|
||||
const postUploadAction = useMemo<PostUploadAction>(() => ({ type: 'SET_CONTROL_ADAPTER_IMAGE', id }), [id]);
|
||||
|
||||
const shouldShowProcessedImage =
|
||||
controlImage &&
|
||||
processedControlImage &&
|
||||
!isMouseOverImage &&
|
||||
!pendingControlImages.includes(id) &&
|
||||
processorType !== 'none';
|
||||
|
||||
useEffect(() => {
|
||||
if (isConnected && (isErrorControlImage || isErrorProcessedControlImage)) {
|
||||
handleResetControlImage();
|
||||
}
|
||||
}, [handleResetControlImage, isConnected, isErrorControlImage, isErrorProcessedControlImage]);
|
||||
|
||||
return (
|
||||
<Flex
|
||||
onMouseEnter={handleMouseEnter}
|
||||
onMouseLeave={handleMouseLeave}
|
||||
position="relative"
|
||||
w="full"
|
||||
h={isSmall ? 36 : 366} // magic no touch
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
>
|
||||
<IAIDndImage
|
||||
draggableData={draggableData}
|
||||
droppableData={droppableData}
|
||||
imageDTO={controlImage}
|
||||
isDropDisabled={shouldShowProcessedImage}
|
||||
postUploadAction={postUploadAction}
|
||||
/>
|
||||
|
||||
<Box
|
||||
position="absolute"
|
||||
top={0}
|
||||
insetInlineStart={0}
|
||||
w="full"
|
||||
h="full"
|
||||
opacity={shouldShowProcessedImage ? 1 : 0}
|
||||
transitionProperty="common"
|
||||
transitionDuration="normal"
|
||||
pointerEvents="none"
|
||||
>
|
||||
<IAIDndImage
|
||||
draggableData={draggableData}
|
||||
droppableData={droppableData}
|
||||
imageDTO={processedControlImage}
|
||||
isUploadDisabled={true}
|
||||
/>
|
||||
</Box>
|
||||
|
||||
<>
|
||||
<IAIDndImageIcon
|
||||
onClick={handleResetControlImage}
|
||||
icon={controlImage ? <PiArrowCounterClockwiseBold size={16} /> : undefined}
|
||||
tooltip={t('controlnet.resetControlImage')}
|
||||
/>
|
||||
<IAIDndImageIcon
|
||||
onClick={handleSaveControlImage}
|
||||
icon={controlImage ? <PiFloppyDiskBold size={16} /> : undefined}
|
||||
tooltip={t('controlnet.saveControlImage')}
|
||||
styleOverrides={saveControlImageStyleOverrides}
|
||||
/>
|
||||
<IAIDndImageIcon
|
||||
onClick={handleSetControlImageToDimensions}
|
||||
icon={controlImage ? <PiRulerBold size={16} /> : undefined}
|
||||
tooltip={shift ? t('controlnet.setControlImageDimensionsForce') : t('controlnet.setControlImageDimensions')}
|
||||
styleOverrides={setControlImageDimensionsStyleOverrides}
|
||||
/>
|
||||
</>
|
||||
|
||||
{pendingControlImages.includes(id) && (
|
||||
<Flex
|
||||
position="absolute"
|
||||
top={0}
|
||||
insetInlineStart={0}
|
||||
w="full"
|
||||
h="full"
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
opacity={0.8}
|
||||
borderRadius="base"
|
||||
bg="base.900"
|
||||
>
|
||||
<Spinner size="xl" color="base.400" />
|
||||
</Flex>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(ControlAdapterImagePreview);
|
||||
|
||||
const saveControlImageStyleOverrides: SystemStyleObject = { mt: 6 };
|
||||
const setControlImageDimensionsStyleOverrides: SystemStyleObject = { mt: 12 };
|
@ -1,72 +0,0 @@
|
||||
import { Box, Flex, Icon, IconButton } from '@invoke-ai/ui-library';
|
||||
import ControlAdapterProcessorComponent from 'features/controlAdapters/components/ControlAdapterProcessorComponent';
|
||||
import ControlAdapterShouldAutoConfig from 'features/controlAdapters/components/ControlAdapterShouldAutoConfig';
|
||||
import ParamControlAdapterIPMethod from 'features/controlAdapters/components/parameters/ParamControlAdapterIPMethod';
|
||||
import ParamControlAdapterProcessorSelect from 'features/controlAdapters/components/parameters/ParamControlAdapterProcessorSelect';
|
||||
import { useControlAdapterType } from 'features/controlAdapters/hooks/useControlAdapterType';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiCaretUpBold } from 'react-icons/pi';
|
||||
import { useToggle } from 'react-use';
|
||||
|
||||
import ControlAdapterImagePreview from './ControlAdapterImagePreview';
|
||||
import { ParamControlAdapterBeginEnd } from './ParamControlAdapterBeginEnd';
|
||||
import ParamControlAdapterControlMode from './ParamControlAdapterControlMode';
|
||||
import ParamControlAdapterModel from './ParamControlAdapterModel';
|
||||
import ParamControlAdapterWeight from './ParamControlAdapterWeight';
|
||||
|
||||
const ControlAdapterLayerConfig = (props: { id: string }) => {
|
||||
const { id } = props;
|
||||
const controlAdapterType = useControlAdapterType(id);
|
||||
const { t } = useTranslation();
|
||||
const [isExpanded, toggleIsExpanded] = useToggle(false);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={4} position="relative" w="full">
|
||||
<Flex gap={3} alignItems="center" w="full">
|
||||
<Box minW={0} w="full" transitionProperty="common" transitionDuration="0.1s">
|
||||
<ParamControlAdapterModel id={id} />{' '}
|
||||
</Box>
|
||||
|
||||
{controlAdapterType !== 'ip_adapter' && (
|
||||
<IconButton
|
||||
size="sm"
|
||||
tooltip={isExpanded ? t('controlnet.hideAdvanced') : t('controlnet.showAdvanced')}
|
||||
aria-label={isExpanded ? t('controlnet.hideAdvanced') : t('controlnet.showAdvanced')}
|
||||
onClick={toggleIsExpanded}
|
||||
variant="ghost"
|
||||
icon={
|
||||
<Icon
|
||||
boxSize={4}
|
||||
as={PiCaretUpBold}
|
||||
transform={isExpanded ? 'rotate(0deg)' : 'rotate(180deg)'}
|
||||
transitionProperty="common"
|
||||
transitionDuration="normal"
|
||||
/>
|
||||
}
|
||||
/>
|
||||
)}
|
||||
</Flex>
|
||||
<Flex gap={4} w="full" alignItems="center">
|
||||
<Flex flexDir="column" gap={3} w="full">
|
||||
{controlAdapterType === 'ip_adapter' && <ParamControlAdapterIPMethod id={id} />}
|
||||
{controlAdapterType === 'controlnet' && <ParamControlAdapterControlMode id={id} />}
|
||||
<ParamControlAdapterWeight id={id} />
|
||||
<ParamControlAdapterBeginEnd id={id} />
|
||||
</Flex>
|
||||
<Flex alignItems="center" justifyContent="center" h={36} w={36} aspectRatio="1/1">
|
||||
<ControlAdapterImagePreview id={id} isSmall />
|
||||
</Flex>
|
||||
</Flex>
|
||||
{isExpanded && (
|
||||
<>
|
||||
<ControlAdapterShouldAutoConfig id={id} />
|
||||
<ParamControlAdapterProcessorSelect id={id} />
|
||||
<ControlAdapterProcessorComponent id={id} />
|
||||
</>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(ControlAdapterLayerConfig);
|
@ -1,89 +0,0 @@
|
||||
import { CompositeRangeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { useControlAdapterBeginEndStepPct } from 'features/controlAdapters/hooks/useControlAdapterBeginEndStepPct';
|
||||
import { useControlAdapterIsEnabled } from 'features/controlAdapters/hooks/useControlAdapterIsEnabled';
|
||||
import {
|
||||
controlAdapterBeginStepPctChanged,
|
||||
controlAdapterEndStepPctChanged,
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
type Props = {
|
||||
id: string;
|
||||
};
|
||||
|
||||
const formatPct = (v: number) => `${Math.round(v * 100)}%`;
|
||||
|
||||
export const ParamControlAdapterBeginEnd = memo(({ id }: Props) => {
|
||||
const isEnabled = useControlAdapterIsEnabled(id);
|
||||
const stepPcts = useControlAdapterBeginEndStepPct(id);
|
||||
const dispatch = useAppDispatch();
|
||||
const { t } = useTranslation();
|
||||
|
||||
const onChange = useCallback(
|
||||
(v: [number, number]) => {
|
||||
dispatch(
|
||||
controlAdapterBeginStepPctChanged({
|
||||
id,
|
||||
beginStepPct: v[0],
|
||||
})
|
||||
);
|
||||
dispatch(
|
||||
controlAdapterEndStepPctChanged({
|
||||
id,
|
||||
endStepPct: v[1],
|
||||
})
|
||||
);
|
||||
},
|
||||
[dispatch, id]
|
||||
);
|
||||
|
||||
const onReset = useCallback(() => {
|
||||
dispatch(
|
||||
controlAdapterBeginStepPctChanged({
|
||||
id,
|
||||
beginStepPct: 0,
|
||||
})
|
||||
);
|
||||
dispatch(
|
||||
controlAdapterEndStepPctChanged({
|
||||
id,
|
||||
endStepPct: 1,
|
||||
})
|
||||
);
|
||||
}, [dispatch, id]);
|
||||
|
||||
const value = useMemo<[number, number]>(() => [stepPcts?.beginStepPct ?? 0, stepPcts?.endStepPct ?? 1], [stepPcts]);
|
||||
|
||||
if (!stepPcts) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<FormControl isDisabled={!isEnabled} orientation="horizontal">
|
||||
<InformationalPopover feature="controlNetBeginEnd">
|
||||
<FormLabel m={0}>{t('controlnet.beginEndStepPercentShort')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<CompositeRangeSlider
|
||||
aria-label={ariaLabel}
|
||||
value={value}
|
||||
onChange={onChange}
|
||||
onReset={onReset}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.05}
|
||||
fineStep={0.01}
|
||||
minStepsBetweenThumbs={1}
|
||||
formatValue={formatPct}
|
||||
marks
|
||||
withThumbTooltip
|
||||
/>
|
||||
</FormControl>
|
||||
);
|
||||
});
|
||||
|
||||
ParamControlAdapterBeginEnd.displayName = 'ParamControlAdapterBeginEnd';
|
||||
|
||||
const ariaLabel = ['Begin Step %', 'End Step %'];
|
@ -1,136 +0,0 @@
|
||||
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
|
||||
import { Combobox, Flex, FormControl, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox';
|
||||
import { useControlAdapterCLIPVisionModel } from 'features/controlAdapters/hooks/useControlAdapterCLIPVisionModel';
|
||||
import { useControlAdapterIsEnabled } from 'features/controlAdapters/hooks/useControlAdapterIsEnabled';
|
||||
import { useControlAdapterModel } from 'features/controlAdapters/hooks/useControlAdapterModel';
|
||||
import { useControlAdapterModels } from 'features/controlAdapters/hooks/useControlAdapterModels';
|
||||
import { useControlAdapterType } from 'features/controlAdapters/hooks/useControlAdapterType';
|
||||
import {
|
||||
controlAdapterCLIPVisionModelChanged,
|
||||
controlAdapterModelChanged,
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import type { CLIPVisionModel } from 'features/controlAdapters/store/types';
|
||||
import { selectGenerationSlice } from 'features/parameters/store/generationSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import type {
|
||||
AnyModelConfig,
|
||||
ControlNetModelConfig,
|
||||
IPAdapterModelConfig,
|
||||
T2IAdapterModelConfig,
|
||||
} from 'services/api/types';
|
||||
|
||||
type ParamControlAdapterModelProps = {
|
||||
id: string;
|
||||
};
|
||||
|
||||
const selectMainModel = createMemoizedSelector(selectGenerationSlice, (generation) => generation.model);
|
||||
|
||||
const ParamControlAdapterModel = ({ id }: ParamControlAdapterModelProps) => {
|
||||
const isEnabled = useControlAdapterIsEnabled(id);
|
||||
const controlAdapterType = useControlAdapterType(id);
|
||||
const { modelConfig } = useControlAdapterModel(id);
|
||||
const dispatch = useAppDispatch();
|
||||
const currentBaseModel = useAppSelector((s) => s.generation.model?.base);
|
||||
const currentCLIPVisionModel = useControlAdapterCLIPVisionModel(id);
|
||||
const mainModel = useAppSelector(selectMainModel);
|
||||
const { t } = useTranslation();
|
||||
|
||||
const [modelConfigs, { isLoading }] = useControlAdapterModels(controlAdapterType);
|
||||
|
||||
const _onChange = useCallback(
|
||||
(modelConfig: ControlNetModelConfig | IPAdapterModelConfig | T2IAdapterModelConfig | null) => {
|
||||
if (!modelConfig) {
|
||||
return;
|
||||
}
|
||||
dispatch(
|
||||
controlAdapterModelChanged({
|
||||
id,
|
||||
modelConfig,
|
||||
})
|
||||
);
|
||||
},
|
||||
[dispatch, id]
|
||||
);
|
||||
|
||||
const onCLIPVisionModelChange = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
if (!v?.value) {
|
||||
return;
|
||||
}
|
||||
dispatch(controlAdapterCLIPVisionModelChanged({ id, clipVisionModel: v.value as CLIPVisionModel }));
|
||||
},
|
||||
[dispatch, id]
|
||||
);
|
||||
|
||||
const selectedModel = useMemo(
|
||||
() => (modelConfig && controlAdapterType ? { ...modelConfig, model_type: controlAdapterType } : null),
|
||||
[controlAdapterType, modelConfig]
|
||||
);
|
||||
|
||||
const getIsDisabled = useCallback(
|
||||
(model: AnyModelConfig): boolean => {
|
||||
const isCompatible = currentBaseModel === model.base;
|
||||
const hasMainModel = Boolean(currentBaseModel);
|
||||
return !hasMainModel || !isCompatible;
|
||||
},
|
||||
[currentBaseModel]
|
||||
);
|
||||
|
||||
const { options, value, onChange, noOptionsMessage } = useGroupedModelCombobox({
|
||||
modelConfigs,
|
||||
onChange: _onChange,
|
||||
selectedModel,
|
||||
getIsDisabled,
|
||||
isLoading,
|
||||
});
|
||||
|
||||
const clipVisionOptions = useMemo<ComboboxOption[]>(
|
||||
() => [
|
||||
{ label: 'ViT-H', value: 'ViT-H' },
|
||||
{ label: 'ViT-G', value: 'ViT-G' },
|
||||
],
|
||||
[]
|
||||
);
|
||||
|
||||
const clipVisionModel = useMemo(
|
||||
() => clipVisionOptions.find((o) => o.value === currentCLIPVisionModel),
|
||||
[clipVisionOptions, currentCLIPVisionModel]
|
||||
);
|
||||
|
||||
return (
|
||||
<Flex gap={4}>
|
||||
<Tooltip label={selectedModel?.description}>
|
||||
<FormControl isDisabled={!isEnabled} isInvalid={!value || mainModel?.base !== modelConfig?.base} w="full">
|
||||
<Combobox
|
||||
options={options}
|
||||
placeholder={t('controlnet.selectModel')}
|
||||
value={value}
|
||||
onChange={onChange}
|
||||
noOptionsMessage={noOptionsMessage}
|
||||
/>
|
||||
</FormControl>
|
||||
</Tooltip>
|
||||
{modelConfig?.type === 'ip_adapter' && modelConfig.format === 'checkpoint' && (
|
||||
<FormControl
|
||||
isDisabled={!isEnabled}
|
||||
isInvalid={!value || mainModel?.base !== modelConfig?.base}
|
||||
width="max-content"
|
||||
minWidth={28}
|
||||
>
|
||||
<Combobox
|
||||
options={clipVisionOptions}
|
||||
placeholder={t('controlnet.selectCLIPVisionModel')}
|
||||
value={clipVisionModel}
|
||||
onChange={onCLIPVisionModelChange}
|
||||
/>
|
||||
</FormControl>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(ParamControlAdapterModel);
|
@ -0,0 +1,95 @@
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { caLayerAdded, ipaLayerAdded, rgLayerIPAdapterAdded } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import {
|
||||
buildControlNet,
|
||||
buildIPAdapter,
|
||||
buildT2IAdapter,
|
||||
CONTROLNET_PROCESSORS,
|
||||
isProcessorType,
|
||||
} from 'features/controlLayers/util/controlAdapters';
|
||||
import { zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
import { useControlNetAndT2IAdapterModels, useIPAdapterModels } from 'services/api/hooks/modelsByType';
|
||||
import type { ControlNetModelConfig, IPAdapterModelConfig, T2IAdapterModelConfig } from 'services/api/types';
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
|
||||
export const useAddCALayer = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const baseModel = useAppSelector((s) => s.generation.model?.base);
|
||||
const [modelConfigs] = useControlNetAndT2IAdapterModels();
|
||||
const model: ControlNetModelConfig | T2IAdapterModelConfig | null = useMemo(() => {
|
||||
// prefer to use a model that matches the base model
|
||||
const compatibleModels = modelConfigs.filter((m) => (baseModel ? m.base === baseModel : true));
|
||||
return compatibleModels[0] ?? modelConfigs[0] ?? null;
|
||||
}, [baseModel, modelConfigs]);
|
||||
const isDisabled = useMemo(() => !model, [model]);
|
||||
const addCALayer = useCallback(() => {
|
||||
if (!model) {
|
||||
return;
|
||||
}
|
||||
|
||||
const id = uuidv4();
|
||||
const defaultPreprocessor = model.default_settings?.preprocessor;
|
||||
const processorConfig = isProcessorType(defaultPreprocessor)
|
||||
? CONTROLNET_PROCESSORS[defaultPreprocessor].buildDefaults(baseModel)
|
||||
: null;
|
||||
|
||||
const builder = model.type === 'controlnet' ? buildControlNet : buildT2IAdapter;
|
||||
const controlAdapter = builder(id, {
|
||||
model: zModelIdentifierField.parse(model),
|
||||
processorConfig,
|
||||
});
|
||||
|
||||
dispatch(caLayerAdded(controlAdapter));
|
||||
}, [dispatch, model, baseModel]);
|
||||
|
||||
return [addCALayer, isDisabled] as const;
|
||||
};
|
||||
|
||||
export const useAddIPALayer = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const baseModel = useAppSelector((s) => s.generation.model?.base);
|
||||
const [modelConfigs] = useIPAdapterModels();
|
||||
const model: IPAdapterModelConfig | null = useMemo(() => {
|
||||
// prefer to use a model that matches the base model
|
||||
const compatibleModels = modelConfigs.filter((m) => (baseModel ? m.base === baseModel : true));
|
||||
return compatibleModels[0] ?? modelConfigs[0] ?? null;
|
||||
}, [baseModel, modelConfigs]);
|
||||
const isDisabled = useMemo(() => !model, [model]);
|
||||
const addIPALayer = useCallback(() => {
|
||||
if (!model) {
|
||||
return;
|
||||
}
|
||||
const id = uuidv4();
|
||||
const ipAdapter = buildIPAdapter(id, {
|
||||
model: zModelIdentifierField.parse(model),
|
||||
});
|
||||
dispatch(ipaLayerAdded(ipAdapter));
|
||||
}, [dispatch, model]);
|
||||
|
||||
return [addIPALayer, isDisabled] as const;
|
||||
};
|
||||
|
||||
export const useAddIPAdapterToIPALayer = (layerId: string) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const baseModel = useAppSelector((s) => s.generation.model?.base);
|
||||
const [modelConfigs] = useIPAdapterModels();
|
||||
const model: IPAdapterModelConfig | null = useMemo(() => {
|
||||
// prefer to use a model that matches the base model
|
||||
const compatibleModels = modelConfigs.filter((m) => (baseModel ? m.base === baseModel : true));
|
||||
return compatibleModels[0] ?? modelConfigs[0] ?? null;
|
||||
}, [baseModel, modelConfigs]);
|
||||
const isDisabled = useMemo(() => !model, [model]);
|
||||
const addIPAdapter = useCallback(() => {
|
||||
if (!model) {
|
||||
return;
|
||||
}
|
||||
const id = uuidv4();
|
||||
const ipAdapter = buildIPAdapter(id, {
|
||||
model: zModelIdentifierField.parse(model),
|
||||
});
|
||||
dispatch(rgLayerIPAdapterAdded({ layerId, ipAdapter }));
|
||||
}, [dispatch, model, layerId]);
|
||||
|
||||
return [addIPAdapter, isDisabled] as const;
|
||||
};
|
@ -9,9 +9,9 @@ import {
|
||||
$lastMouseDownPos,
|
||||
$tool,
|
||||
brushSizeChanged,
|
||||
maskLayerLineAdded,
|
||||
maskLayerPointsAdded,
|
||||
maskLayerRectAdded,
|
||||
rgLayerLineAdded,
|
||||
rgLayerPointsAdded,
|
||||
rgLayerRectAdded,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type Konva from 'konva';
|
||||
import type { KonvaEventObject } from 'konva/lib/Node';
|
||||
@ -71,7 +71,7 @@ export const useMouseEvents = () => {
|
||||
}
|
||||
if (tool === 'brush' || tool === 'eraser') {
|
||||
dispatch(
|
||||
maskLayerLineAdded({
|
||||
rgLayerLineAdded({
|
||||
layerId: selectedLayerId,
|
||||
points: [pos.x, pos.y, pos.x, pos.y],
|
||||
tool,
|
||||
@ -94,7 +94,7 @@ export const useMouseEvents = () => {
|
||||
const tool = $tool.get();
|
||||
if (pos && lastPos && selectedLayerId && tool === 'rect') {
|
||||
dispatch(
|
||||
maskLayerRectAdded({
|
||||
rgLayerRectAdded({
|
||||
layerId: selectedLayerId,
|
||||
rect: {
|
||||
x: Math.min(pos.x, lastPos.x),
|
||||
@ -128,7 +128,7 @@ export const useMouseEvents = () => {
|
||||
}
|
||||
}
|
||||
lastCursorPosRef.current = [pos.x, pos.y];
|
||||
dispatch(maskLayerPointsAdded({ layerId: selectedLayerId, point: lastCursorPosRef.current }));
|
||||
dispatch(rgLayerPointsAdded({ layerId: selectedLayerId, point: lastCursorPosRef.current }));
|
||||
}
|
||||
},
|
||||
[dispatch, selectedLayerId, tool]
|
||||
@ -149,7 +149,7 @@ export const useMouseEvents = () => {
|
||||
$isMouseDown.get() &&
|
||||
(tool === 'brush' || tool === 'eraser')
|
||||
) {
|
||||
dispatch(maskLayerPointsAdded({ layerId: selectedLayerId, point: [pos.x, pos.y] }));
|
||||
dispatch(rgLayerPointsAdded({ layerId: selectedLayerId, point: [pos.x, pos.y] }));
|
||||
}
|
||||
$isMouseOver.set(false);
|
||||
$isMouseDown.set(false);
|
||||
@ -181,7 +181,7 @@ export const useMouseEvents = () => {
|
||||
}
|
||||
if (tool === 'brush' || tool === 'eraser') {
|
||||
dispatch(
|
||||
maskLayerLineAdded({
|
||||
rgLayerLineAdded({
|
||||
layerId: selectedLayerId,
|
||||
points: [pos.x, pos.y, pos.x, pos.y],
|
||||
tool,
|
||||
|
@ -5,15 +5,12 @@ import { useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const selectValidLayerCount = createSelector(selectControlLayersSlice, (controlLayers) => {
|
||||
if (!controlLayers.present.isEnabled) {
|
||||
return 0;
|
||||
}
|
||||
const validLayers = controlLayers.present.layers
|
||||
.filter(isRegionalGuidanceLayer)
|
||||
.filter((l) => l.isEnabled)
|
||||
.filter((l) => {
|
||||
const hasTextPrompt = Boolean(l.positivePrompt || l.negativePrompt);
|
||||
const hasAtLeastOneImagePrompt = l.ipAdapterIds.length > 0;
|
||||
const hasAtLeastOneImagePrompt = l.ipAdapters.length > 0;
|
||||
return hasTextPrompt || hasAtLeastOneImagePrompt;
|
||||
});
|
||||
|
||||
|
@ -3,12 +3,22 @@ import { createSlice, isAnyOf } from '@reduxjs/toolkit';
|
||||
import type { PersistConfig, RootState } from 'app/store/store';
|
||||
import { moveBackward, moveForward, moveToBack, moveToFront } from 'common/util/arrayUtils';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { roundToMultiple } from 'common/util/roundDownToMultiple';
|
||||
import type {
|
||||
CLIPVisionModel,
|
||||
ControlMode,
|
||||
ControlNetConfig,
|
||||
IPAdapterConfig,
|
||||
IPMethod,
|
||||
ProcessorConfig,
|
||||
T2IAdapterConfig,
|
||||
} from 'features/controlLayers/util/controlAdapters';
|
||||
import {
|
||||
controlAdapterImageChanged,
|
||||
controlAdapterProcessedImageChanged,
|
||||
isAnyControlAdapterAdded,
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
buildControlAdapterProcessor,
|
||||
controlNetToT2IAdapter,
|
||||
imageDTOToImageWithDims,
|
||||
t2iAdapterToControlNet,
|
||||
} from 'features/controlLayers/util/controlAdapters';
|
||||
import { zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { calculateNewSize } from 'features/parameters/components/ImageSize/calculateNewSize';
|
||||
import { initialAspectRatioState } from 'features/parameters/components/ImageSize/constants';
|
||||
import type { AspectRatioState } from 'features/parameters/components/ImageSize/types';
|
||||
@ -20,6 +30,7 @@ import { isEqual, partition } from 'lodash-es';
|
||||
import { atom } from 'nanostores';
|
||||
import type { RgbColor } from 'react-colorful';
|
||||
import type { UndoableOptions } from 'redux-undo';
|
||||
import type { ControlNetModelConfig, ImageDTO, IPAdapterModelConfig, T2IAdapterModelConfig } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
|
||||
@ -41,13 +52,11 @@ export const initialControlLayersState: ControlLayersState = {
|
||||
brushSize: 100,
|
||||
layers: [],
|
||||
globalMaskLayerOpacity: 0.3, // this globally changes all mask layers' opacity
|
||||
isEnabled: true,
|
||||
positivePrompt: '',
|
||||
negativePrompt: '',
|
||||
positivePrompt2: '',
|
||||
negativePrompt2: '',
|
||||
shouldConcatPrompts: true,
|
||||
initialImage: null,
|
||||
size: {
|
||||
width: 512,
|
||||
height: 512,
|
||||
@ -72,14 +81,45 @@ const resetLayer = (layer: Layer) => {
|
||||
layer.bboxNeedsUpdate = false;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if (layer.type === 'control_adapter_layer') {
|
||||
// TODO
|
||||
}
|
||||
export const selectCALayerOrThrow = (state: ControlLayersState, layerId: string): ControlAdapterLayer => {
|
||||
const layer = state.layers.find((l) => l.id === layerId);
|
||||
assert(isControlAdapterLayer(layer));
|
||||
return layer;
|
||||
};
|
||||
export const selectIPALayerOrThrow = (state: ControlLayersState, layerId: string): IPAdapterLayer => {
|
||||
const layer = state.layers.find((l) => l.id === layerId);
|
||||
assert(isIPAdapterLayer(layer));
|
||||
return layer;
|
||||
};
|
||||
const selectCAOrIPALayerOrThrow = (
|
||||
state: ControlLayersState,
|
||||
layerId: string
|
||||
): ControlAdapterLayer | IPAdapterLayer => {
|
||||
const layer = state.layers.find((l) => l.id === layerId);
|
||||
assert(isControlAdapterLayer(layer) || isIPAdapterLayer(layer));
|
||||
return layer;
|
||||
};
|
||||
const selectRGLayerOrThrow = (state: ControlLayersState, layerId: string): RegionalGuidanceLayer => {
|
||||
const layer = state.layers.find((l) => l.id === layerId);
|
||||
assert(isRegionalGuidanceLayer(layer));
|
||||
return layer;
|
||||
};
|
||||
export const selectRGLayerIPAdapterOrThrow = (
|
||||
state: ControlLayersState,
|
||||
layerId: string,
|
||||
ipAdapterId: string
|
||||
): IPAdapterConfig => {
|
||||
const layer = state.layers.find((l) => l.id === layerId);
|
||||
assert(isRegionalGuidanceLayer(layer));
|
||||
const ipAdapter = layer.ipAdapters.find((ipAdapter) => ipAdapter.id === ipAdapterId);
|
||||
assert(ipAdapter);
|
||||
return ipAdapter;
|
||||
};
|
||||
const getVectorMaskPreviewColor = (state: ControlLayersState): RgbColor => {
|
||||
const vmLayers = state.layers.filter(isRegionalGuidanceLayer);
|
||||
const lastColor = vmLayers[vmLayers.length - 1]?.previewColor;
|
||||
const rgLayers = state.layers.filter(isRegionalGuidanceLayer);
|
||||
const lastColor = rgLayers[rgLayers.length - 1]?.previewColor;
|
||||
return LayerColors.next(lastColor);
|
||||
};
|
||||
|
||||
@ -87,71 +127,7 @@ export const controlLayersSlice = createSlice({
|
||||
name: 'controlLayers',
|
||||
initialState: initialControlLayersState,
|
||||
reducers: {
|
||||
//#region All Layers
|
||||
regionalGuidanceLayerAdded: (state, action: PayloadAction<{ layerId: string }>) => {
|
||||
const { layerId } = action.payload;
|
||||
const layer: RegionalGuidanceLayer = {
|
||||
id: getRegionalGuidanceLayerId(layerId),
|
||||
type: 'regional_guidance_layer',
|
||||
isEnabled: true,
|
||||
bbox: null,
|
||||
bboxNeedsUpdate: false,
|
||||
maskObjects: [],
|
||||
previewColor: getVectorMaskPreviewColor(state),
|
||||
x: 0,
|
||||
y: 0,
|
||||
autoNegative: 'invert',
|
||||
needsPixelBbox: false,
|
||||
positivePrompt: '',
|
||||
negativePrompt: null,
|
||||
ipAdapterIds: [],
|
||||
isSelected: true,
|
||||
};
|
||||
state.layers.push(layer);
|
||||
state.selectedLayerId = layer.id;
|
||||
for (const layer of state.layers.filter(isRenderableLayer)) {
|
||||
if (layer.id !== layerId) {
|
||||
layer.isSelected = false;
|
||||
}
|
||||
}
|
||||
return;
|
||||
},
|
||||
ipAdapterLayerAdded: (state, action: PayloadAction<{ layerId: string; ipAdapterId: string }>) => {
|
||||
const { layerId, ipAdapterId } = action.payload;
|
||||
const layer: IPAdapterLayer = {
|
||||
id: getIPAdapterLayerId(layerId),
|
||||
type: 'ip_adapter_layer',
|
||||
isEnabled: true,
|
||||
ipAdapterId,
|
||||
};
|
||||
state.layers.push(layer);
|
||||
return;
|
||||
},
|
||||
controlAdapterLayerAdded: (state, action: PayloadAction<{ layerId: string; controlNetId: string }>) => {
|
||||
const { layerId, controlNetId } = action.payload;
|
||||
const layer: ControlAdapterLayer = {
|
||||
id: getControlNetLayerId(layerId),
|
||||
type: 'control_adapter_layer',
|
||||
controlNetId,
|
||||
x: 0,
|
||||
y: 0,
|
||||
bbox: null,
|
||||
bboxNeedsUpdate: false,
|
||||
isEnabled: true,
|
||||
imageName: null,
|
||||
opacity: 1,
|
||||
isSelected: true,
|
||||
isFilterEnabled: true,
|
||||
};
|
||||
state.layers.push(layer);
|
||||
state.selectedLayerId = layer.id;
|
||||
for (const layer of state.layers.filter(isRenderableLayer)) {
|
||||
if (layer.id !== layerId) {
|
||||
layer.isSelected = false;
|
||||
}
|
||||
}
|
||||
return;
|
||||
},
|
||||
//#region Any Layer Type
|
||||
layerSelected: (state, action: PayloadAction<string>) => {
|
||||
for (const layer of state.layers.filter(isRenderableLayer)) {
|
||||
if (layer.id === action.payload) {
|
||||
@ -235,144 +211,397 @@ export const controlLayersSlice = createSlice({
|
||||
state.layers = state.layers.filter((l) => l.id !== state.selectedLayerId);
|
||||
state.selectedLayerId = state.layers[0]?.id ?? null;
|
||||
},
|
||||
layerOpacityChanged: (state, action: PayloadAction<{ layerId: string; opacity: number }>) => {
|
||||
const { layerId, opacity } = action.payload;
|
||||
const layer = state.layers.filter(isControlAdapterLayer).find((l) => l.id === layerId);
|
||||
if (layer) {
|
||||
layer.opacity = opacity;
|
||||
}
|
||||
allLayersDeleted: (state) => {
|
||||
state.layers = [];
|
||||
state.selectedLayerId = null;
|
||||
},
|
||||
//#endregion
|
||||
|
||||
//#region CA Layers
|
||||
isFilterEnabledChanged: (state, action: PayloadAction<{ layerId: string; isFilterEnabled: boolean }>) => {
|
||||
caLayerAdded: {
|
||||
reducer: (
|
||||
state,
|
||||
action: PayloadAction<{ layerId: string; controlAdapter: ControlNetConfig | T2IAdapterConfig }>
|
||||
) => {
|
||||
const { layerId, controlAdapter } = action.payload;
|
||||
const layer: ControlAdapterLayer = {
|
||||
id: getCALayerId(layerId),
|
||||
type: 'control_adapter_layer',
|
||||
x: 0,
|
||||
y: 0,
|
||||
bbox: null,
|
||||
bboxNeedsUpdate: false,
|
||||
isEnabled: true,
|
||||
opacity: 1,
|
||||
isSelected: true,
|
||||
isFilterEnabled: true,
|
||||
controlAdapter,
|
||||
};
|
||||
state.layers.push(layer);
|
||||
state.selectedLayerId = layer.id;
|
||||
for (const layer of state.layers.filter(isRenderableLayer)) {
|
||||
if (layer.id !== layerId) {
|
||||
layer.isSelected = false;
|
||||
}
|
||||
}
|
||||
},
|
||||
prepare: (controlAdapter: ControlNetConfig | T2IAdapterConfig) => ({
|
||||
payload: { layerId: uuidv4(), controlAdapter },
|
||||
}),
|
||||
},
|
||||
caLayerImageChanged: (state, action: PayloadAction<{ layerId: string; imageDTO: ImageDTO | null }>) => {
|
||||
const { layerId, imageDTO } = action.payload;
|
||||
const layer = selectCALayerOrThrow(state, layerId);
|
||||
layer.bbox = null;
|
||||
layer.bboxNeedsUpdate = true;
|
||||
layer.isEnabled = true;
|
||||
if (imageDTO) {
|
||||
const newImage = imageDTOToImageWithDims(imageDTO);
|
||||
if (isEqual(newImage, layer.controlAdapter.image)) {
|
||||
return;
|
||||
}
|
||||
layer.controlAdapter.image = newImage;
|
||||
layer.controlAdapter.processedImage = null;
|
||||
} else {
|
||||
layer.controlAdapter.image = null;
|
||||
layer.controlAdapter.processedImage = null;
|
||||
}
|
||||
},
|
||||
caLayerProcessedImageChanged: (state, action: PayloadAction<{ layerId: string; imageDTO: ImageDTO | null }>) => {
|
||||
const { layerId, imageDTO } = action.payload;
|
||||
const layer = selectCALayerOrThrow(state, layerId);
|
||||
layer.bbox = null;
|
||||
layer.bboxNeedsUpdate = true;
|
||||
layer.isEnabled = true;
|
||||
layer.controlAdapter.processedImage = imageDTO ? imageDTOToImageWithDims(imageDTO) : null;
|
||||
},
|
||||
caLayerModelChanged: (
|
||||
state,
|
||||
action: PayloadAction<{
|
||||
layerId: string;
|
||||
modelConfig: ControlNetModelConfig | T2IAdapterModelConfig | null;
|
||||
}>
|
||||
) => {
|
||||
const { layerId, modelConfig } = action.payload;
|
||||
const layer = selectCALayerOrThrow(state, layerId);
|
||||
if (!modelConfig) {
|
||||
layer.controlAdapter.model = null;
|
||||
return;
|
||||
}
|
||||
layer.controlAdapter.model = zModelIdentifierField.parse(modelConfig);
|
||||
|
||||
// We may need to convert the CA to match the model
|
||||
if (layer.controlAdapter.type === 't2i_adapter' && layer.controlAdapter.model.type === 'controlnet') {
|
||||
layer.controlAdapter = t2iAdapterToControlNet(layer.controlAdapter);
|
||||
} else if (layer.controlAdapter.type === 'controlnet' && layer.controlAdapter.model.type === 't2i_adapter') {
|
||||
layer.controlAdapter = controlNetToT2IAdapter(layer.controlAdapter);
|
||||
}
|
||||
|
||||
const candidateProcessorConfig = buildControlAdapterProcessor(modelConfig);
|
||||
if (candidateProcessorConfig?.type !== layer.controlAdapter.processorConfig?.type) {
|
||||
// The processor has changed. For example, the previous model was a Canny model and the new model is a Depth
|
||||
// model. We need to use the new processor.
|
||||
layer.controlAdapter.processedImage = null;
|
||||
layer.controlAdapter.processorConfig = candidateProcessorConfig;
|
||||
}
|
||||
},
|
||||
caLayerControlModeChanged: (state, action: PayloadAction<{ layerId: string; controlMode: ControlMode }>) => {
|
||||
const { layerId, controlMode } = action.payload;
|
||||
const layer = selectCALayerOrThrow(state, layerId);
|
||||
assert(layer.controlAdapter.type === 'controlnet');
|
||||
layer.controlAdapter.controlMode = controlMode;
|
||||
},
|
||||
caLayerProcessorConfigChanged: (
|
||||
state,
|
||||
action: PayloadAction<{ layerId: string; processorConfig: ProcessorConfig | null }>
|
||||
) => {
|
||||
const { layerId, processorConfig } = action.payload;
|
||||
const layer = selectCALayerOrThrow(state, layerId);
|
||||
layer.controlAdapter.processorConfig = processorConfig;
|
||||
if (!processorConfig) {
|
||||
layer.controlAdapter.processedImage = null;
|
||||
}
|
||||
},
|
||||
caLayerIsFilterEnabledChanged: (state, action: PayloadAction<{ layerId: string; isFilterEnabled: boolean }>) => {
|
||||
const { layerId, isFilterEnabled } = action.payload;
|
||||
const layer = state.layers.filter(isControlAdapterLayer).find((l) => l.id === layerId);
|
||||
if (layer) {
|
||||
layer.isFilterEnabled = isFilterEnabled;
|
||||
const layer = selectCALayerOrThrow(state, layerId);
|
||||
layer.isFilterEnabled = isFilterEnabled;
|
||||
},
|
||||
caLayerOpacityChanged: (state, action: PayloadAction<{ layerId: string; opacity: number }>) => {
|
||||
const { layerId, opacity } = action.payload;
|
||||
const layer = selectCALayerOrThrow(state, layerId);
|
||||
layer.opacity = opacity;
|
||||
},
|
||||
caLayerIsProcessingImageChanged: (
|
||||
state,
|
||||
action: PayloadAction<{ layerId: string; isProcessingImage: boolean }>
|
||||
) => {
|
||||
const { layerId, isProcessingImage } = action.payload;
|
||||
const layer = selectCALayerOrThrow(state, layerId);
|
||||
layer.controlAdapter.isProcessingImage = isProcessingImage;
|
||||
},
|
||||
//#endregion
|
||||
|
||||
//#region IP Adapter Layers
|
||||
ipaLayerAdded: {
|
||||
reducer: (state, action: PayloadAction<{ layerId: string; ipAdapter: IPAdapterConfig }>) => {
|
||||
const { layerId, ipAdapter } = action.payload;
|
||||
const layer: IPAdapterLayer = {
|
||||
id: getIPALayerId(layerId),
|
||||
type: 'ip_adapter_layer',
|
||||
isEnabled: true,
|
||||
ipAdapter,
|
||||
};
|
||||
state.layers.push(layer);
|
||||
},
|
||||
prepare: (ipAdapter: IPAdapterConfig) => ({ payload: { layerId: uuidv4(), ipAdapter } }),
|
||||
},
|
||||
ipaLayerImageChanged: (state, action: PayloadAction<{ layerId: string; imageDTO: ImageDTO | null }>) => {
|
||||
const { layerId, imageDTO } = action.payload;
|
||||
const layer = selectIPALayerOrThrow(state, layerId);
|
||||
layer.ipAdapter.image = imageDTO ? imageDTOToImageWithDims(imageDTO) : null;
|
||||
},
|
||||
ipaLayerMethodChanged: (state, action: PayloadAction<{ layerId: string; method: IPMethod }>) => {
|
||||
const { layerId, method } = action.payload;
|
||||
const layer = selectIPALayerOrThrow(state, layerId);
|
||||
layer.ipAdapter.method = method;
|
||||
},
|
||||
ipaLayerModelChanged: (
|
||||
state,
|
||||
action: PayloadAction<{
|
||||
layerId: string;
|
||||
modelConfig: IPAdapterModelConfig | null;
|
||||
}>
|
||||
) => {
|
||||
const { layerId, modelConfig } = action.payload;
|
||||
const layer = selectIPALayerOrThrow(state, layerId);
|
||||
if (!modelConfig) {
|
||||
layer.ipAdapter.model = null;
|
||||
return;
|
||||
}
|
||||
layer.ipAdapter.model = zModelIdentifierField.parse(modelConfig);
|
||||
},
|
||||
ipaLayerCLIPVisionModelChanged: (
|
||||
state,
|
||||
action: PayloadAction<{ layerId: string; clipVisionModel: CLIPVisionModel }>
|
||||
) => {
|
||||
const { layerId, clipVisionModel } = action.payload;
|
||||
const layer = selectIPALayerOrThrow(state, layerId);
|
||||
layer.ipAdapter.clipVisionModel = clipVisionModel;
|
||||
},
|
||||
//#endregion
|
||||
|
||||
//#region CA or IPA Layers
|
||||
caOrIPALayerWeightChanged: (state, action: PayloadAction<{ layerId: string; weight: number }>) => {
|
||||
const { layerId, weight } = action.payload;
|
||||
const layer = selectCAOrIPALayerOrThrow(state, layerId);
|
||||
if (layer.type === 'control_adapter_layer') {
|
||||
layer.controlAdapter.weight = weight;
|
||||
} else {
|
||||
layer.ipAdapter.weight = weight;
|
||||
}
|
||||
},
|
||||
caOrIPALayerBeginEndStepPctChanged: (
|
||||
state,
|
||||
action: PayloadAction<{ layerId: string; beginEndStepPct: [number, number] }>
|
||||
) => {
|
||||
const { layerId, beginEndStepPct } = action.payload;
|
||||
const layer = selectCAOrIPALayerOrThrow(state, layerId);
|
||||
if (layer.type === 'control_adapter_layer') {
|
||||
layer.controlAdapter.beginEndStepPct = beginEndStepPct;
|
||||
} else {
|
||||
layer.ipAdapter.beginEndStepPct = beginEndStepPct;
|
||||
}
|
||||
},
|
||||
//#endregion
|
||||
|
||||
//#region Mask Layers
|
||||
maskLayerPositivePromptChanged: (state, action: PayloadAction<{ layerId: string; prompt: string | null }>) => {
|
||||
const { layerId, prompt } = action.payload;
|
||||
const layer = state.layers.find((l) => l.id === layerId);
|
||||
if (layer?.type === 'regional_guidance_layer') {
|
||||
layer.positivePrompt = prompt;
|
||||
}
|
||||
},
|
||||
maskLayerNegativePromptChanged: (state, action: PayloadAction<{ layerId: string; prompt: string | null }>) => {
|
||||
const { layerId, prompt } = action.payload;
|
||||
const layer = state.layers.find((l) => l.id === layerId);
|
||||
if (layer?.type === 'regional_guidance_layer') {
|
||||
layer.negativePrompt = prompt;
|
||||
}
|
||||
},
|
||||
maskLayerIPAdapterAdded: (state, action: PayloadAction<{ layerId: string; ipAdapterId: string }>) => {
|
||||
const { layerId, ipAdapterId } = action.payload;
|
||||
const layer = state.layers.find((l) => l.id === layerId);
|
||||
if (layer?.type === 'regional_guidance_layer') {
|
||||
layer.ipAdapterIds.push(ipAdapterId);
|
||||
}
|
||||
},
|
||||
maskLayerIPAdapterDeleted: (state, action: PayloadAction<{ layerId: string; ipAdapterId: string }>) => {
|
||||
const { layerId, ipAdapterId } = action.payload;
|
||||
const layer = state.layers.find((l) => l.id === layerId);
|
||||
if (layer?.type === 'regional_guidance_layer') {
|
||||
layer.ipAdapterIds = layer.ipAdapterIds.filter((id) => id !== ipAdapterId);
|
||||
}
|
||||
},
|
||||
maskLayerPreviewColorChanged: (state, action: PayloadAction<{ layerId: string; color: RgbColor }>) => {
|
||||
const { layerId, color } = action.payload;
|
||||
const layer = state.layers.find((l) => l.id === layerId);
|
||||
if (layer?.type === 'regional_guidance_layer') {
|
||||
layer.previewColor = color;
|
||||
}
|
||||
},
|
||||
maskLayerLineAdded: {
|
||||
reducer: (
|
||||
state,
|
||||
action: PayloadAction<
|
||||
{ layerId: string; points: [number, number, number, number]; tool: DrawingTool },
|
||||
string,
|
||||
{ uuid: string }
|
||||
>
|
||||
) => {
|
||||
const { layerId, points, tool } = action.payload;
|
||||
const layer = state.layers.find((l) => l.id === layerId);
|
||||
if (layer?.type === 'regional_guidance_layer') {
|
||||
const lineId = getRegionalGuidanceLayerLineId(layer.id, action.meta.uuid);
|
||||
layer.maskObjects.push({
|
||||
type: 'vector_mask_line',
|
||||
tool: tool,
|
||||
id: lineId,
|
||||
// Points must be offset by the layer's x and y coordinates
|
||||
// TODO: Handle this in the event listener?
|
||||
points: [points[0] - layer.x, points[1] - layer.y, points[2] - layer.x, points[3] - layer.y],
|
||||
strokeWidth: state.brushSize,
|
||||
});
|
||||
layer.bboxNeedsUpdate = true;
|
||||
if (!layer.needsPixelBbox && tool === 'eraser') {
|
||||
layer.needsPixelBbox = true;
|
||||
//#region RG Layers
|
||||
rgLayerAdded: {
|
||||
reducer: (state, action: PayloadAction<{ layerId: string }>) => {
|
||||
const { layerId } = action.payload;
|
||||
const layer: RegionalGuidanceLayer = {
|
||||
id: getRGLayerId(layerId),
|
||||
type: 'regional_guidance_layer',
|
||||
isEnabled: true,
|
||||
bbox: null,
|
||||
bboxNeedsUpdate: false,
|
||||
maskObjects: [],
|
||||
previewColor: getVectorMaskPreviewColor(state),
|
||||
x: 0,
|
||||
y: 0,
|
||||
autoNegative: 'invert',
|
||||
needsPixelBbox: false,
|
||||
positivePrompt: '',
|
||||
negativePrompt: null,
|
||||
ipAdapters: [],
|
||||
isSelected: true,
|
||||
};
|
||||
state.layers.push(layer);
|
||||
state.selectedLayerId = layer.id;
|
||||
for (const layer of state.layers.filter(isRenderableLayer)) {
|
||||
if (layer.id !== layerId) {
|
||||
layer.isSelected = false;
|
||||
}
|
||||
}
|
||||
},
|
||||
prepare: () => ({ payload: { layerId: uuidv4() } }),
|
||||
},
|
||||
rgLayerPositivePromptChanged: (state, action: PayloadAction<{ layerId: string; prompt: string | null }>) => {
|
||||
const { layerId, prompt } = action.payload;
|
||||
const layer = selectRGLayerOrThrow(state, layerId);
|
||||
layer.positivePrompt = prompt;
|
||||
},
|
||||
rgLayerNegativePromptChanged: (state, action: PayloadAction<{ layerId: string; prompt: string | null }>) => {
|
||||
const { layerId, prompt } = action.payload;
|
||||
const layer = selectRGLayerOrThrow(state, layerId);
|
||||
layer.negativePrompt = prompt;
|
||||
},
|
||||
rgLayerPreviewColorChanged: (state, action: PayloadAction<{ layerId: string; color: RgbColor }>) => {
|
||||
const { layerId, color } = action.payload;
|
||||
const layer = selectRGLayerOrThrow(state, layerId);
|
||||
layer.previewColor = color;
|
||||
},
|
||||
rgLayerLineAdded: {
|
||||
reducer: (
|
||||
state,
|
||||
action: PayloadAction<{
|
||||
layerId: string;
|
||||
points: [number, number, number, number];
|
||||
tool: DrawingTool;
|
||||
lineUuid: string;
|
||||
}>
|
||||
) => {
|
||||
const { layerId, points, tool, lineUuid } = action.payload;
|
||||
const layer = selectRGLayerOrThrow(state, layerId);
|
||||
const lineId = getRGLayerLineId(layer.id, lineUuid);
|
||||
layer.maskObjects.push({
|
||||
type: 'vector_mask_line',
|
||||
tool: tool,
|
||||
id: lineId,
|
||||
// Points must be offset by the layer's x and y coordinates
|
||||
// TODO: Handle this in the event listener?
|
||||
points: [points[0] - layer.x, points[1] - layer.y, points[2] - layer.x, points[3] - layer.y],
|
||||
strokeWidth: state.brushSize,
|
||||
});
|
||||
layer.bboxNeedsUpdate = true;
|
||||
if (!layer.needsPixelBbox && tool === 'eraser') {
|
||||
layer.needsPixelBbox = true;
|
||||
}
|
||||
},
|
||||
prepare: (payload: { layerId: string; points: [number, number, number, number]; tool: DrawingTool }) => ({
|
||||
payload,
|
||||
meta: { uuid: uuidv4() },
|
||||
payload: { ...payload, lineUuid: uuidv4() },
|
||||
}),
|
||||
},
|
||||
maskLayerPointsAdded: (state, action: PayloadAction<{ layerId: string; point: [number, number] }>) => {
|
||||
rgLayerPointsAdded: (state, action: PayloadAction<{ layerId: string; point: [number, number] }>) => {
|
||||
const { layerId, point } = action.payload;
|
||||
const layer = state.layers.find((l) => l.id === layerId);
|
||||
if (layer?.type === 'regional_guidance_layer') {
|
||||
const lastLine = layer.maskObjects.findLast(isLine);
|
||||
if (!lastLine) {
|
||||
return;
|
||||
}
|
||||
// Points must be offset by the layer's x and y coordinates
|
||||
// TODO: Handle this in the event listener
|
||||
lastLine.points.push(point[0] - layer.x, point[1] - layer.y);
|
||||
layer.bboxNeedsUpdate = true;
|
||||
const layer = selectRGLayerOrThrow(state, layerId);
|
||||
const lastLine = layer.maskObjects.findLast(isLine);
|
||||
if (!lastLine) {
|
||||
return;
|
||||
}
|
||||
// Points must be offset by the layer's x and y coordinates
|
||||
// TODO: Handle this in the event listener
|
||||
lastLine.points.push(point[0] - layer.x, point[1] - layer.y);
|
||||
layer.bboxNeedsUpdate = true;
|
||||
},
|
||||
maskLayerRectAdded: {
|
||||
reducer: (state, action: PayloadAction<{ layerId: string; rect: IRect }, string, { uuid: string }>) => {
|
||||
const { layerId, rect } = action.payload;
|
||||
rgLayerRectAdded: {
|
||||
reducer: (state, action: PayloadAction<{ layerId: string; rect: IRect; rectUuid: string }>) => {
|
||||
const { layerId, rect, rectUuid } = action.payload;
|
||||
if (rect.height === 0 || rect.width === 0) {
|
||||
// Ignore zero-area rectangles
|
||||
return;
|
||||
}
|
||||
const layer = state.layers.find((l) => l.id === layerId);
|
||||
if (layer?.type === 'regional_guidance_layer') {
|
||||
const id = getMaskedGuidnaceLayerRectId(layer.id, action.meta.uuid);
|
||||
layer.maskObjects.push({
|
||||
type: 'vector_mask_rect',
|
||||
id,
|
||||
x: rect.x - layer.x,
|
||||
y: rect.y - layer.y,
|
||||
width: rect.width,
|
||||
height: rect.height,
|
||||
});
|
||||
layer.bboxNeedsUpdate = true;
|
||||
}
|
||||
const layer = selectRGLayerOrThrow(state, layerId);
|
||||
const id = getRGLayerRectId(layer.id, rectUuid);
|
||||
layer.maskObjects.push({
|
||||
type: 'vector_mask_rect',
|
||||
id,
|
||||
x: rect.x - layer.x,
|
||||
y: rect.y - layer.y,
|
||||
width: rect.width,
|
||||
height: rect.height,
|
||||
});
|
||||
layer.bboxNeedsUpdate = true;
|
||||
},
|
||||
prepare: (payload: { layerId: string; rect: IRect }) => ({ payload, meta: { uuid: uuidv4() } }),
|
||||
prepare: (payload: { layerId: string; rect: IRect }) => ({ payload: { ...payload, rectUuid: uuidv4() } }),
|
||||
},
|
||||
maskLayerAutoNegativeChanged: (
|
||||
rgLayerAutoNegativeChanged: (
|
||||
state,
|
||||
action: PayloadAction<{ layerId: string; autoNegative: ParameterAutoNegative }>
|
||||
) => {
|
||||
const { layerId, autoNegative } = action.payload;
|
||||
const layer = state.layers.find((l) => l.id === layerId);
|
||||
if (layer?.type === 'regional_guidance_layer') {
|
||||
layer.autoNegative = autoNegative;
|
||||
const layer = selectRGLayerOrThrow(state, layerId);
|
||||
layer.autoNegative = autoNegative;
|
||||
},
|
||||
rgLayerIPAdapterAdded: (state, action: PayloadAction<{ layerId: string; ipAdapter: IPAdapterConfig }>) => {
|
||||
const { layerId, ipAdapter } = action.payload;
|
||||
const layer = selectRGLayerOrThrow(state, layerId);
|
||||
layer.ipAdapters.push(ipAdapter);
|
||||
},
|
||||
rgLayerIPAdapterDeleted: (state, action: PayloadAction<{ layerId: string; ipAdapterId: string }>) => {
|
||||
const { layerId, ipAdapterId } = action.payload;
|
||||
const layer = selectRGLayerOrThrow(state, layerId);
|
||||
layer.ipAdapters = layer.ipAdapters.filter((ipAdapter) => ipAdapter.id !== ipAdapterId);
|
||||
},
|
||||
rgLayerIPAdapterImageChanged: (
|
||||
state,
|
||||
action: PayloadAction<{ layerId: string; ipAdapterId: string; imageDTO: ImageDTO | null }>
|
||||
) => {
|
||||
const { layerId, ipAdapterId, imageDTO } = action.payload;
|
||||
const ipAdapter = selectRGLayerIPAdapterOrThrow(state, layerId, ipAdapterId);
|
||||
ipAdapter.image = imageDTO ? imageDTOToImageWithDims(imageDTO) : null;
|
||||
},
|
||||
rgLayerIPAdapterWeightChanged: (
|
||||
state,
|
||||
action: PayloadAction<{ layerId: string; ipAdapterId: string; weight: number }>
|
||||
) => {
|
||||
const { layerId, ipAdapterId, weight } = action.payload;
|
||||
const ipAdapter = selectRGLayerIPAdapterOrThrow(state, layerId, ipAdapterId);
|
||||
ipAdapter.weight = weight;
|
||||
},
|
||||
rgLayerIPAdapterBeginEndStepPctChanged: (
|
||||
state,
|
||||
action: PayloadAction<{ layerId: string; ipAdapterId: string; beginEndStepPct: [number, number] }>
|
||||
) => {
|
||||
const { layerId, ipAdapterId, beginEndStepPct } = action.payload;
|
||||
const ipAdapter = selectRGLayerIPAdapterOrThrow(state, layerId, ipAdapterId);
|
||||
ipAdapter.beginEndStepPct = beginEndStepPct;
|
||||
},
|
||||
rgLayerIPAdapterMethodChanged: (
|
||||
state,
|
||||
action: PayloadAction<{ layerId: string; ipAdapterId: string; method: IPMethod }>
|
||||
) => {
|
||||
const { layerId, ipAdapterId, method } = action.payload;
|
||||
const ipAdapter = selectRGLayerIPAdapterOrThrow(state, layerId, ipAdapterId);
|
||||
ipAdapter.method = method;
|
||||
},
|
||||
rgLayerIPAdapterModelChanged: (
|
||||
state,
|
||||
action: PayloadAction<{
|
||||
layerId: string;
|
||||
ipAdapterId: string;
|
||||
modelConfig: IPAdapterModelConfig | null;
|
||||
}>
|
||||
) => {
|
||||
const { layerId, ipAdapterId, modelConfig } = action.payload;
|
||||
const ipAdapter = selectRGLayerIPAdapterOrThrow(state, layerId, ipAdapterId);
|
||||
if (!modelConfig) {
|
||||
ipAdapter.model = null;
|
||||
return;
|
||||
}
|
||||
ipAdapter.model = zModelIdentifierField.parse(modelConfig);
|
||||
},
|
||||
rgLayerIPAdapterCLIPVisionModelChanged: (
|
||||
state,
|
||||
action: PayloadAction<{ layerId: string; ipAdapterId: string; clipVisionModel: CLIPVisionModel }>
|
||||
) => {
|
||||
const { layerId, ipAdapterId, clipVisionModel } = action.payload;
|
||||
const ipAdapter = selectRGLayerIPAdapterOrThrow(state, layerId, ipAdapterId);
|
||||
ipAdapter.clipVisionModel = clipVisionModel;
|
||||
},
|
||||
//#endregion
|
||||
|
||||
//#region Base Layer
|
||||
//#region Globals
|
||||
positivePromptChanged: (state, action: PayloadAction<string>) => {
|
||||
state.positivePrompt = action.payload;
|
||||
},
|
||||
@ -409,18 +638,12 @@ export const controlLayersSlice = createSlice({
|
||||
aspectRatioChanged: (state, action: PayloadAction<AspectRatioState>) => {
|
||||
state.size.aspectRatio = action.payload;
|
||||
},
|
||||
//#endregion
|
||||
|
||||
//#region General
|
||||
brushSizeChanged: (state, action: PayloadAction<number>) => {
|
||||
state.brushSize = Math.round(action.payload);
|
||||
},
|
||||
globalMaskLayerOpacityChanged: (state, action: PayloadAction<number>) => {
|
||||
state.globalMaskLayerOpacity = action.payload;
|
||||
},
|
||||
isEnabledChanged: (state, action: PayloadAction<boolean>) => {
|
||||
state.isEnabled = action.payload;
|
||||
},
|
||||
undo: (state) => {
|
||||
// Invalidate the bbox for all layers to prevent stale bboxes
|
||||
for (const layer of state.layers.filter(isRenderableLayer)) {
|
||||
@ -451,36 +674,14 @@ export const controlLayersSlice = createSlice({
|
||||
state.size.height = height;
|
||||
});
|
||||
|
||||
builder.addCase(controlAdapterImageChanged, (state, action) => {
|
||||
const { id, controlImage } = action.payload;
|
||||
const layer = state.layers.filter(isControlAdapterLayer).find((l) => l.controlNetId === id);
|
||||
if (layer) {
|
||||
layer.bbox = null;
|
||||
layer.bboxNeedsUpdate = true;
|
||||
layer.isEnabled = true;
|
||||
layer.imageName = controlImage?.image_name ?? null;
|
||||
}
|
||||
});
|
||||
|
||||
builder.addCase(controlAdapterProcessedImageChanged, (state, action) => {
|
||||
const { id, processedControlImage } = action.payload;
|
||||
const layer = state.layers.filter(isControlAdapterLayer).find((l) => l.controlNetId === id);
|
||||
if (layer) {
|
||||
layer.bbox = null;
|
||||
layer.bboxNeedsUpdate = true;
|
||||
layer.isEnabled = true;
|
||||
layer.imageName = processedControlImage?.image_name ?? null;
|
||||
}
|
||||
});
|
||||
|
||||
// TODO: This is a temp fix to reduce issues with T2I adapter having a different downscaling
|
||||
// factor than the UNet. Hopefully we get an upstream fix in diffusers.
|
||||
builder.addMatcher(isAnyControlAdapterAdded, (state, action) => {
|
||||
if (action.payload.type === 't2i_adapter') {
|
||||
state.size.width = roundToMultiple(state.size.width, 64);
|
||||
state.size.height = roundToMultiple(state.size.height, 64);
|
||||
}
|
||||
});
|
||||
// // TODO: This is a temp fix to reduce issues with T2I adapter having a different downscaling
|
||||
// // factor than the UNet. Hopefully we get an upstream fix in diffusers.
|
||||
// builder.addMatcher(isAnyControlAdapterAdded, (state, action) => {
|
||||
// if (action.payload.type === 't2i_adapter') {
|
||||
// state.size.width = roundToMultiple(state.size.width, 64);
|
||||
// state.size.height = roundToMultiple(state.size.height, 64);
|
||||
// }
|
||||
// });
|
||||
},
|
||||
});
|
||||
|
||||
@ -516,36 +717,57 @@ class LayerColors {
|
||||
}
|
||||
|
||||
export const {
|
||||
// All layer actions
|
||||
layerDeleted,
|
||||
layerMovedBackward,
|
||||
layerMovedForward,
|
||||
layerMovedToBack,
|
||||
layerMovedToFront,
|
||||
layerReset,
|
||||
// Any Layer Type
|
||||
layerSelected,
|
||||
layerVisibilityToggled,
|
||||
layerTranslated,
|
||||
layerBboxChanged,
|
||||
layerVisibilityToggled,
|
||||
layerReset,
|
||||
layerDeleted,
|
||||
layerMovedForward,
|
||||
layerMovedToFront,
|
||||
layerMovedBackward,
|
||||
layerMovedToBack,
|
||||
selectedLayerReset,
|
||||
selectedLayerDeleted,
|
||||
regionalGuidanceLayerAdded,
|
||||
ipAdapterLayerAdded,
|
||||
controlAdapterLayerAdded,
|
||||
layerOpacityChanged,
|
||||
// CA layer actions
|
||||
isFilterEnabledChanged,
|
||||
// Mask layer actions
|
||||
maskLayerLineAdded,
|
||||
maskLayerPointsAdded,
|
||||
maskLayerRectAdded,
|
||||
maskLayerNegativePromptChanged,
|
||||
maskLayerPositivePromptChanged,
|
||||
maskLayerIPAdapterAdded,
|
||||
maskLayerIPAdapterDeleted,
|
||||
maskLayerAutoNegativeChanged,
|
||||
maskLayerPreviewColorChanged,
|
||||
// Base layer actions
|
||||
allLayersDeleted,
|
||||
// CA Layers
|
||||
caLayerAdded,
|
||||
caLayerImageChanged,
|
||||
caLayerProcessedImageChanged,
|
||||
caLayerModelChanged,
|
||||
caLayerControlModeChanged,
|
||||
caLayerProcessorConfigChanged,
|
||||
caLayerIsFilterEnabledChanged,
|
||||
caLayerOpacityChanged,
|
||||
caLayerIsProcessingImageChanged,
|
||||
// IPA Layers
|
||||
ipaLayerAdded,
|
||||
ipaLayerImageChanged,
|
||||
ipaLayerMethodChanged,
|
||||
ipaLayerModelChanged,
|
||||
ipaLayerCLIPVisionModelChanged,
|
||||
// CA or IPA Layers
|
||||
caOrIPALayerWeightChanged,
|
||||
caOrIPALayerBeginEndStepPctChanged,
|
||||
// RG Layers
|
||||
rgLayerAdded,
|
||||
rgLayerPositivePromptChanged,
|
||||
rgLayerNegativePromptChanged,
|
||||
rgLayerPreviewColorChanged,
|
||||
rgLayerLineAdded,
|
||||
rgLayerPointsAdded,
|
||||
rgLayerRectAdded,
|
||||
rgLayerAutoNegativeChanged,
|
||||
rgLayerIPAdapterAdded,
|
||||
rgLayerIPAdapterDeleted,
|
||||
rgLayerIPAdapterImageChanged,
|
||||
rgLayerIPAdapterWeightChanged,
|
||||
rgLayerIPAdapterBeginEndStepPctChanged,
|
||||
rgLayerIPAdapterMethodChanged,
|
||||
rgLayerIPAdapterModelChanged,
|
||||
rgLayerIPAdapterCLIPVisionModelChanged,
|
||||
// Globals
|
||||
positivePromptChanged,
|
||||
negativePromptChanged,
|
||||
positivePrompt2Changed,
|
||||
@ -554,27 +776,12 @@ export const {
|
||||
widthChanged,
|
||||
heightChanged,
|
||||
aspectRatioChanged,
|
||||
// General actions
|
||||
brushSizeChanged,
|
||||
globalMaskLayerOpacityChanged,
|
||||
undo,
|
||||
redo,
|
||||
} = controlLayersSlice.actions;
|
||||
|
||||
export const selectAllControlAdapterIds = (controlLayers: ControlLayersState) =>
|
||||
controlLayers.layers.flatMap((l) => {
|
||||
if (l.type === 'control_adapter_layer') {
|
||||
return [l.controlNetId];
|
||||
}
|
||||
if (l.type === 'ip_adapter_layer') {
|
||||
return [l.ipAdapterId];
|
||||
}
|
||||
if (l.type === 'regional_guidance_layer') {
|
||||
return l.ipAdapterIds;
|
||||
}
|
||||
return [];
|
||||
});
|
||||
|
||||
export const selectControlLayersSlice = (state: RootState) => state.controlLayers;
|
||||
|
||||
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
|
||||
@ -600,24 +807,23 @@ export const BACKGROUND_RECT_ID = 'background_layer.rect';
|
||||
export const NO_LAYERS_MESSAGE_LAYER_ID = 'no_layers_message';
|
||||
|
||||
// Names (aka classes) for Konva layers and objects
|
||||
export const CONTROLNET_LAYER_NAME = 'control_adapter_layer';
|
||||
export const CONTROLNET_LAYER_IMAGE_NAME = 'control_adapter_layer.image';
|
||||
export const regional_guidance_layer_NAME = 'regional_guidance_layer';
|
||||
export const regional_guidance_layer_LINE_NAME = 'regional_guidance_layer.line';
|
||||
export const regional_guidance_layer_OBJECT_GROUP_NAME = 'regional_guidance_layer.object_group';
|
||||
export const regional_guidance_layer_RECT_NAME = 'regional_guidance_layer.rect';
|
||||
export const CA_LAYER_NAME = 'control_adapter_layer';
|
||||
export const CA_LAYER_IMAGE_NAME = 'control_adapter_layer.image';
|
||||
export const RG_LAYER_NAME = 'regional_guidance_layer';
|
||||
export const RG_LAYER_LINE_NAME = 'regional_guidance_layer.line';
|
||||
export const RG_LAYER_OBJECT_GROUP_NAME = 'regional_guidance_layer.object_group';
|
||||
export const RG_LAYER_RECT_NAME = 'regional_guidance_layer.rect';
|
||||
export const LAYER_BBOX_NAME = 'layer.bbox';
|
||||
|
||||
// Getters for non-singleton layer and object IDs
|
||||
const getRegionalGuidanceLayerId = (layerId: string) => `${regional_guidance_layer_NAME}_${layerId}`;
|
||||
const getRegionalGuidanceLayerLineId = (layerId: string, lineId: string) => `${layerId}.line_${lineId}`;
|
||||
const getMaskedGuidnaceLayerRectId = (layerId: string, lineId: string) => `${layerId}.rect_${lineId}`;
|
||||
export const getRegionalGuidanceLayerObjectGroupId = (layerId: string, groupId: string) =>
|
||||
`${layerId}.objectGroup_${groupId}`;
|
||||
const getRGLayerId = (layerId: string) => `${RG_LAYER_NAME}_${layerId}`;
|
||||
const getRGLayerLineId = (layerId: string, lineId: string) => `${layerId}.line_${lineId}`;
|
||||
const getRGLayerRectId = (layerId: string, lineId: string) => `${layerId}.rect_${lineId}`;
|
||||
export const getRGLayerObjectGroupId = (layerId: string, groupId: string) => `${layerId}.objectGroup_${groupId}`;
|
||||
export const getLayerBboxId = (layerId: string) => `${layerId}.bbox`;
|
||||
const getControlNetLayerId = (layerId: string) => `control_adapter_layer_${layerId}`;
|
||||
export const getControlNetLayerImageId = (layerId: string, imageName: string) => `${layerId}.image_${imageName}`;
|
||||
const getIPAdapterLayerId = (layerId: string) => `ip_adapter_layer_${layerId}`;
|
||||
const getCALayerId = (layerId: string) => `control_adapter_layer_${layerId}`;
|
||||
export const getCALayerImageId = (layerId: string, imageName: string) => `${layerId}.image_${imageName}`;
|
||||
const getIPALayerId = (layerId: string) => `ip_adapter_layer_${layerId}`;
|
||||
|
||||
export const controlLayersPersistConfig: PersistConfig<ControlLayersState> = {
|
||||
name: controlLayersSlice.name,
|
||||
@ -631,9 +837,13 @@ const undoableGroupByMatcher = isAnyOf(
|
||||
layerTranslated,
|
||||
brushSizeChanged,
|
||||
globalMaskLayerOpacityChanged,
|
||||
maskLayerPositivePromptChanged,
|
||||
maskLayerNegativePromptChanged,
|
||||
maskLayerPreviewColorChanged
|
||||
positivePromptChanged,
|
||||
negativePromptChanged,
|
||||
positivePrompt2Changed,
|
||||
negativePrompt2Changed,
|
||||
rgLayerPositivePromptChanged,
|
||||
rgLayerNegativePromptChanged,
|
||||
rgLayerPreviewColorChanged
|
||||
);
|
||||
|
||||
// These are used to group actions into logical lines below (hate typos)
|
||||
@ -645,13 +855,13 @@ export const controlLayersUndoableConfig: UndoableOptions<ControlLayersState, Un
|
||||
undoType: controlLayersSlice.actions.undo.type,
|
||||
redoType: controlLayersSlice.actions.redo.type,
|
||||
groupBy: (action, state, history) => {
|
||||
// Lines are started with `maskLayerLineAdded` and may have any number of subsequent `maskLayerPointsAdded` events.
|
||||
// Lines are started with `rgLayerLineAdded` and may have any number of subsequent `rgLayerPointsAdded` events.
|
||||
// We can use a double-buffer-esque trick to group each "logical" line as a single undoable action, without grouping
|
||||
// separate logical lines as a single undo action.
|
||||
if (maskLayerLineAdded.match(action)) {
|
||||
if (rgLayerLineAdded.match(action)) {
|
||||
return history.group === LINE_1 ? LINE_2 : LINE_1;
|
||||
}
|
||||
if (maskLayerPointsAdded.match(action)) {
|
||||
if (rgLayerPointsAdded.match(action)) {
|
||||
if (history.group === LINE_1 || history.group === LINE_2) {
|
||||
return history.group;
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
import type { ControlNetConfig, IPAdapterConfig, T2IAdapterConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { AspectRatioState } from 'features/parameters/components/ImageSize/types';
|
||||
import type {
|
||||
ParameterAutoNegative,
|
||||
@ -47,15 +48,14 @@ type RenderableLayerBase = LayerBase & {
|
||||
|
||||
export type ControlAdapterLayer = RenderableLayerBase & {
|
||||
type: 'control_adapter_layer'; // technically, also t2i adapter layer
|
||||
controlNetId: string;
|
||||
imageName: string | null;
|
||||
opacity: number;
|
||||
isFilterEnabled: boolean;
|
||||
controlAdapter: ControlNetConfig | T2IAdapterConfig;
|
||||
};
|
||||
|
||||
export type IPAdapterLayer = LayerBase & {
|
||||
type: 'ip_adapter_layer'; // technically, also t2i adapter layer
|
||||
ipAdapterId: string;
|
||||
type: 'ip_adapter_layer';
|
||||
ipAdapter: IPAdapterConfig;
|
||||
};
|
||||
|
||||
export type RegionalGuidanceLayer = RenderableLayerBase & {
|
||||
@ -63,7 +63,7 @@ export type RegionalGuidanceLayer = RenderableLayerBase & {
|
||||
maskObjects: (VectorMaskLine | VectorMaskRect)[];
|
||||
positivePrompt: ParameterPositivePrompt | null;
|
||||
negativePrompt: ParameterNegativePrompt | null; // Up to one text prompt per mask
|
||||
ipAdapterIds: string[]; // Any number of image prompts
|
||||
ipAdapters: IPAdapterConfig[]; // Any number of image prompts
|
||||
previewColor: RgbColor;
|
||||
autoNegative: ParameterAutoNegative;
|
||||
needsPixelBbox: boolean; // Needs the slower pixel-based bbox calculation - set to true when an there is an eraser object
|
||||
@ -77,13 +77,11 @@ export type ControlLayersState = {
|
||||
layers: Layer[];
|
||||
brushSize: number;
|
||||
globalMaskLayerOpacity: number;
|
||||
isEnabled: boolean;
|
||||
positivePrompt: ParameterPositivePrompt;
|
||||
negativePrompt: ParameterNegativePrompt;
|
||||
positivePrompt2: ParameterPositiveStylePromptSDXL;
|
||||
negativePrompt2: ParameterNegativeStylePromptSDXL;
|
||||
shouldConcatPrompts: boolean;
|
||||
initialImage: string | null;
|
||||
size: {
|
||||
width: ParameterWidth;
|
||||
height: ParameterHeight;
|
||||
|
@ -1,6 +1,6 @@
|
||||
import openBase64ImageInTab from 'common/util/openBase64ImageInTab';
|
||||
import { imageDataToDataURL } from 'features/canvas/util/blobToDataURL';
|
||||
import { regional_guidance_layer_OBJECT_GROUP_NAME } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { RG_LAYER_OBJECT_GROUP_NAME } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import Konva from 'konva';
|
||||
import type { Layer as KonvaLayerType } from 'konva/lib/Layer';
|
||||
import type { IRect } from 'konva/lib/types';
|
||||
@ -81,7 +81,7 @@ export const getLayerBboxPixels = (layer: KonvaLayerType, preview: boolean = fal
|
||||
offscreenStage.add(layerClone);
|
||||
|
||||
for (const child of layerClone.getChildren()) {
|
||||
if (child.name() === regional_guidance_layer_OBJECT_GROUP_NAME) {
|
||||
if (child.name() === RG_LAYER_OBJECT_GROUP_NAME) {
|
||||
// We need to cache the group to ensure it composites out eraser strokes correctly
|
||||
child.opacity(1);
|
||||
child.cache();
|
||||
|
@ -0,0 +1,23 @@
|
||||
import type { S } from 'services/api/types';
|
||||
import type { Equals } from 'tsafe';
|
||||
import { assert } from 'tsafe';
|
||||
import { describe, test } from 'vitest';
|
||||
|
||||
import type {
|
||||
CLIPVisionModel,
|
||||
ControlMode,
|
||||
DepthAnythingModelSize,
|
||||
IPMethod,
|
||||
ProcessorConfig,
|
||||
ProcessorType,
|
||||
} from './controlAdapters';
|
||||
|
||||
describe('Control Adapter Types', () => {
|
||||
test('ProcessorType', () => assert<Equals<ProcessorConfig['type'], ProcessorType>>());
|
||||
test('IP Adapter Method', () => assert<Equals<NonNullable<S['IPAdapterInvocation']['method']>, IPMethod>>());
|
||||
test('CLIP Vision Model', () =>
|
||||
assert<Equals<NonNullable<S['IPAdapterInvocation']['clip_vision_model']>, CLIPVisionModel>>());
|
||||
test('Control Mode', () => assert<Equals<NonNullable<S['ControlNetInvocation']['control_mode']>, ControlMode>>());
|
||||
test('DepthAnything Model Size', () =>
|
||||
assert<Equals<NonNullable<S['DepthAnythingImageProcessorInvocation']['model_size']>, DepthAnythingModelSize>>());
|
||||
});
|
@ -0,0 +1,483 @@
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import type {
|
||||
ParameterControlNetModel,
|
||||
ParameterIPAdapterModel,
|
||||
ParameterT2IAdapterModel,
|
||||
} from 'features/parameters/types/parameterSchemas';
|
||||
import { merge, omit } from 'lodash-es';
|
||||
import type {
|
||||
BaseModelType,
|
||||
CannyImageProcessorInvocation,
|
||||
ColorMapImageProcessorInvocation,
|
||||
ContentShuffleImageProcessorInvocation,
|
||||
ControlNetModelConfig,
|
||||
DepthAnythingImageProcessorInvocation,
|
||||
DWOpenposeImageProcessorInvocation,
|
||||
Graph,
|
||||
HedImageProcessorInvocation,
|
||||
ImageDTO,
|
||||
LineartAnimeImageProcessorInvocation,
|
||||
LineartImageProcessorInvocation,
|
||||
MediapipeFaceProcessorInvocation,
|
||||
MidasDepthImageProcessorInvocation,
|
||||
MlsdImageProcessorInvocation,
|
||||
NormalbaeImageProcessorInvocation,
|
||||
PidiImageProcessorInvocation,
|
||||
T2IAdapterModelConfig,
|
||||
ZoeDepthImageProcessorInvocation,
|
||||
} from 'services/api/types';
|
||||
import { z } from 'zod';
|
||||
|
||||
const zDepthAnythingModelSize = z.enum(['large', 'base', 'small']);
|
||||
export type DepthAnythingModelSize = z.infer<typeof zDepthAnythingModelSize>;
|
||||
export const isDepthAnythingModelSize = (v: unknown): v is DepthAnythingModelSize =>
|
||||
zDepthAnythingModelSize.safeParse(v).success;
|
||||
|
||||
export type CannyProcessorConfig = Required<
|
||||
Pick<CannyImageProcessorInvocation, 'id' | 'type' | 'low_threshold' | 'high_threshold'>
|
||||
>;
|
||||
export type ColorMapProcessorConfig = Required<
|
||||
Pick<ColorMapImageProcessorInvocation, 'id' | 'type' | 'color_map_tile_size'>
|
||||
>;
|
||||
export type ContentShuffleProcessorConfig = Required<
|
||||
Pick<ContentShuffleImageProcessorInvocation, 'id' | 'type' | 'w' | 'h' | 'f'>
|
||||
>;
|
||||
export type DepthAnythingProcessorConfig = Required<
|
||||
Pick<DepthAnythingImageProcessorInvocation, 'id' | 'type' | 'model_size'>
|
||||
>;
|
||||
export type HedProcessorConfig = Required<Pick<HedImageProcessorInvocation, 'id' | 'type' | 'scribble'>>;
|
||||
type LineartAnimeProcessorConfig = Required<Pick<LineartAnimeImageProcessorInvocation, 'id' | 'type'>>;
|
||||
export type LineartProcessorConfig = Required<Pick<LineartImageProcessorInvocation, 'id' | 'type' | 'coarse'>>;
|
||||
export type MediapipeFaceProcessorConfig = Required<
|
||||
Pick<MediapipeFaceProcessorInvocation, 'id' | 'type' | 'max_faces' | 'min_confidence'>
|
||||
>;
|
||||
export type MidasDepthProcessorConfig = Required<
|
||||
Pick<MidasDepthImageProcessorInvocation, 'id' | 'type' | 'a_mult' | 'bg_th'>
|
||||
>;
|
||||
export type MlsdProcessorConfig = Required<Pick<MlsdImageProcessorInvocation, 'id' | 'type' | 'thr_v' | 'thr_d'>>;
|
||||
type NormalbaeProcessorConfig = Required<Pick<NormalbaeImageProcessorInvocation, 'id' | 'type'>>;
|
||||
export type DWOpenposeProcessorConfig = Required<
|
||||
Pick<DWOpenposeImageProcessorInvocation, 'id' | 'type' | 'draw_body' | 'draw_face' | 'draw_hands'>
|
||||
>;
|
||||
export type PidiProcessorConfig = Required<Pick<PidiImageProcessorInvocation, 'id' | 'type' | 'safe' | 'scribble'>>;
|
||||
type ZoeDepthProcessorConfig = Required<Pick<ZoeDepthImageProcessorInvocation, 'id' | 'type'>>;
|
||||
|
||||
export type ProcessorConfig =
|
||||
| CannyProcessorConfig
|
||||
| ColorMapProcessorConfig
|
||||
| ContentShuffleProcessorConfig
|
||||
| DepthAnythingProcessorConfig
|
||||
| HedProcessorConfig
|
||||
| LineartAnimeProcessorConfig
|
||||
| LineartProcessorConfig
|
||||
| MediapipeFaceProcessorConfig
|
||||
| MidasDepthProcessorConfig
|
||||
| MlsdProcessorConfig
|
||||
| NormalbaeProcessorConfig
|
||||
| DWOpenposeProcessorConfig
|
||||
| PidiProcessorConfig
|
||||
| ZoeDepthProcessorConfig;
|
||||
|
||||
export type ImageWithDims = {
|
||||
imageName: string;
|
||||
width: number;
|
||||
height: number;
|
||||
};
|
||||
|
||||
type ControlAdapterBase = {
|
||||
id: string;
|
||||
weight: number;
|
||||
image: ImageWithDims | null;
|
||||
processedImage: ImageWithDims | null;
|
||||
isProcessingImage: boolean;
|
||||
processorConfig: ProcessorConfig | null;
|
||||
beginEndStepPct: [number, number];
|
||||
};
|
||||
|
||||
const zControlMode = z.enum(['balanced', 'more_prompt', 'more_control', 'unbalanced']);
|
||||
export type ControlMode = z.infer<typeof zControlMode>;
|
||||
export const isControlMode = (v: unknown): v is ControlMode => zControlMode.safeParse(v).success;
|
||||
|
||||
export type ControlNetConfig = ControlAdapterBase & {
|
||||
type: 'controlnet';
|
||||
model: ParameterControlNetModel | null;
|
||||
controlMode: ControlMode;
|
||||
};
|
||||
export const isControlNetConfig = (ca: ControlNetConfig | T2IAdapterConfig): ca is ControlNetConfig =>
|
||||
ca.type === 'controlnet';
|
||||
|
||||
export type T2IAdapterConfig = ControlAdapterBase & {
|
||||
type: 't2i_adapter';
|
||||
model: ParameterT2IAdapterModel | null;
|
||||
};
|
||||
export const isT2IAdapterConfig = (ca: ControlNetConfig | T2IAdapterConfig): ca is T2IAdapterConfig =>
|
||||
ca.type === 't2i_adapter';
|
||||
|
||||
const zCLIPVisionModel = z.enum(['ViT-H', 'ViT-G']);
|
||||
export type CLIPVisionModel = z.infer<typeof zCLIPVisionModel>;
|
||||
export const isCLIPVisionModel = (v: unknown): v is CLIPVisionModel => zCLIPVisionModel.safeParse(v).success;
|
||||
|
||||
const zIPMethod = z.enum(['full', 'style', 'composition']);
|
||||
export type IPMethod = z.infer<typeof zIPMethod>;
|
||||
export const isIPMethod = (v: unknown): v is IPMethod => zIPMethod.safeParse(v).success;
|
||||
|
||||
export type IPAdapterConfig = {
|
||||
id: string;
|
||||
type: 'ip_adapter';
|
||||
weight: number;
|
||||
method: IPMethod;
|
||||
image: ImageWithDims | null;
|
||||
model: ParameterIPAdapterModel | null;
|
||||
clipVisionModel: CLIPVisionModel;
|
||||
beginEndStepPct: [number, number];
|
||||
};
|
||||
|
||||
const zProcessorType = z.enum([
|
||||
'canny_image_processor',
|
||||
'color_map_image_processor',
|
||||
'content_shuffle_image_processor',
|
||||
'depth_anything_image_processor',
|
||||
'hed_image_processor',
|
||||
'lineart_anime_image_processor',
|
||||
'lineart_image_processor',
|
||||
'mediapipe_face_processor',
|
||||
'midas_depth_image_processor',
|
||||
'mlsd_image_processor',
|
||||
'normalbae_image_processor',
|
||||
'dw_openpose_image_processor',
|
||||
'pidi_image_processor',
|
||||
'zoe_depth_image_processor',
|
||||
]);
|
||||
export type ProcessorType = z.infer<typeof zProcessorType>;
|
||||
export const isProcessorType = (v: unknown): v is ProcessorType => zProcessorType.safeParse(v).success;
|
||||
|
||||
type ProcessorData<T extends ProcessorType> = {
|
||||
type: T;
|
||||
labelTKey: string;
|
||||
descriptionTKey: string;
|
||||
buildDefaults(baseModel?: BaseModelType): Extract<ProcessorConfig, { type: T }>;
|
||||
buildNode(
|
||||
image: ImageWithDims,
|
||||
config: Extract<ProcessorConfig, { type: T }>
|
||||
): Extract<Graph['nodes'][string], { type: T }>;
|
||||
};
|
||||
|
||||
const minDim = (image: ImageWithDims): number => Math.min(image.width, image.height);
|
||||
|
||||
type CAProcessorsData = {
|
||||
[key in ProcessorType]: ProcessorData<key>;
|
||||
};
|
||||
/**
|
||||
* A dict of ControlNet processors, including:
|
||||
* - label translation key
|
||||
* - description translation key
|
||||
* - a builder to create default values for the config
|
||||
* - a builder to create the node for the config
|
||||
*
|
||||
* TODO: Generate from the OpenAPI schema
|
||||
*/
|
||||
export const CONTROLNET_PROCESSORS: CAProcessorsData = {
|
||||
canny_image_processor: {
|
||||
type: 'canny_image_processor',
|
||||
labelTKey: 'controlnet.canny',
|
||||
descriptionTKey: 'controlnet.cannyDescription',
|
||||
buildDefaults: () => ({
|
||||
id: 'canny_image_processor',
|
||||
type: 'canny_image_processor',
|
||||
low_threshold: 100,
|
||||
high_threshold: 200,
|
||||
}),
|
||||
buildNode: (image, config) => ({
|
||||
...config,
|
||||
type: 'canny_image_processor',
|
||||
image: { image_name: image.imageName },
|
||||
detect_resolution: minDim(image),
|
||||
image_resolution: minDim(image),
|
||||
}),
|
||||
},
|
||||
color_map_image_processor: {
|
||||
type: 'color_map_image_processor',
|
||||
labelTKey: 'controlnet.colorMap',
|
||||
descriptionTKey: 'controlnet.colorMapDescription',
|
||||
buildDefaults: () => ({
|
||||
id: 'color_map_image_processor',
|
||||
type: 'color_map_image_processor',
|
||||
color_map_tile_size: 64,
|
||||
}),
|
||||
buildNode: (image, config) => ({
|
||||
...config,
|
||||
type: 'color_map_image_processor',
|
||||
image: { image_name: image.imageName },
|
||||
}),
|
||||
},
|
||||
content_shuffle_image_processor: {
|
||||
type: 'content_shuffle_image_processor',
|
||||
labelTKey: 'controlnet.contentShuffle',
|
||||
descriptionTKey: 'controlnet.contentShuffleDescription',
|
||||
buildDefaults: (baseModel) => ({
|
||||
id: 'content_shuffle_image_processor',
|
||||
type: 'content_shuffle_image_processor',
|
||||
h: baseModel === 'sdxl' ? 1024 : 512,
|
||||
w: baseModel === 'sdxl' ? 1024 : 512,
|
||||
f: baseModel === 'sdxl' ? 512 : 256,
|
||||
}),
|
||||
buildNode: (image, config) => ({
|
||||
...config,
|
||||
image: { image_name: image.imageName },
|
||||
detect_resolution: minDim(image),
|
||||
image_resolution: minDim(image),
|
||||
}),
|
||||
},
|
||||
depth_anything_image_processor: {
|
||||
type: 'depth_anything_image_processor',
|
||||
labelTKey: 'controlnet.depthAnything',
|
||||
descriptionTKey: 'controlnet.depthAnythingDescription',
|
||||
buildDefaults: () => ({
|
||||
id: 'depth_anything_image_processor',
|
||||
type: 'depth_anything_image_processor',
|
||||
model_size: 'small',
|
||||
}),
|
||||
buildNode: (image, config) => ({
|
||||
...config,
|
||||
image: { image_name: image.imageName },
|
||||
resolution: minDim(image),
|
||||
}),
|
||||
},
|
||||
hed_image_processor: {
|
||||
type: 'hed_image_processor',
|
||||
labelTKey: 'controlnet.hed',
|
||||
descriptionTKey: 'controlnet.hedDescription',
|
||||
buildDefaults: () => ({
|
||||
id: 'hed_image_processor',
|
||||
type: 'hed_image_processor',
|
||||
scribble: false,
|
||||
}),
|
||||
buildNode: (image, config) => ({
|
||||
...config,
|
||||
image: { image_name: image.imageName },
|
||||
detect_resolution: minDim(image),
|
||||
image_resolution: minDim(image),
|
||||
}),
|
||||
},
|
||||
lineart_anime_image_processor: {
|
||||
type: 'lineart_anime_image_processor',
|
||||
labelTKey: 'controlnet.lineartAnime',
|
||||
descriptionTKey: 'controlnet.lineartAnimeDescription',
|
||||
buildDefaults: () => ({
|
||||
id: 'lineart_anime_image_processor',
|
||||
type: 'lineart_anime_image_processor',
|
||||
}),
|
||||
buildNode: (image, config) => ({
|
||||
...config,
|
||||
image: { image_name: image.imageName },
|
||||
detect_resolution: minDim(image),
|
||||
image_resolution: minDim(image),
|
||||
}),
|
||||
},
|
||||
lineart_image_processor: {
|
||||
type: 'lineart_image_processor',
|
||||
labelTKey: 'controlnet.lineart',
|
||||
descriptionTKey: 'controlnet.lineartDescription',
|
||||
buildDefaults: () => ({
|
||||
id: 'lineart_image_processor',
|
||||
type: 'lineart_image_processor',
|
||||
coarse: false,
|
||||
}),
|
||||
buildNode: (image, config) => ({
|
||||
...config,
|
||||
image: { image_name: image.imageName },
|
||||
detect_resolution: minDim(image),
|
||||
image_resolution: minDim(image),
|
||||
}),
|
||||
},
|
||||
mediapipe_face_processor: {
|
||||
type: 'mediapipe_face_processor',
|
||||
labelTKey: 'controlnet.mediapipeFace',
|
||||
descriptionTKey: 'controlnet.mediapipeFaceDescription',
|
||||
buildDefaults: () => ({
|
||||
id: 'mediapipe_face_processor',
|
||||
type: 'mediapipe_face_processor',
|
||||
max_faces: 1,
|
||||
min_confidence: 0.5,
|
||||
}),
|
||||
buildNode: (image, config) => ({
|
||||
...config,
|
||||
image: { image_name: image.imageName },
|
||||
detect_resolution: minDim(image),
|
||||
image_resolution: minDim(image),
|
||||
}),
|
||||
},
|
||||
midas_depth_image_processor: {
|
||||
type: 'midas_depth_image_processor',
|
||||
labelTKey: 'controlnet.depthMidas',
|
||||
descriptionTKey: 'controlnet.depthMidasDescription',
|
||||
buildDefaults: () => ({
|
||||
id: 'midas_depth_image_processor',
|
||||
type: 'midas_depth_image_processor',
|
||||
a_mult: 2,
|
||||
bg_th: 0.1,
|
||||
}),
|
||||
buildNode: (image, config) => ({
|
||||
...config,
|
||||
image: { image_name: image.imageName },
|
||||
detect_resolution: minDim(image),
|
||||
image_resolution: minDim(image),
|
||||
}),
|
||||
},
|
||||
mlsd_image_processor: {
|
||||
type: 'mlsd_image_processor',
|
||||
labelTKey: 'controlnet.mlsd',
|
||||
descriptionTKey: 'controlnet.mlsdDescription',
|
||||
buildDefaults: () => ({
|
||||
id: 'mlsd_image_processor',
|
||||
type: 'mlsd_image_processor',
|
||||
thr_d: 0.1,
|
||||
thr_v: 0.1,
|
||||
}),
|
||||
buildNode: (image, config) => ({
|
||||
...config,
|
||||
image: { image_name: image.imageName },
|
||||
detect_resolution: minDim(image),
|
||||
image_resolution: minDim(image),
|
||||
}),
|
||||
},
|
||||
normalbae_image_processor: {
|
||||
type: 'normalbae_image_processor',
|
||||
labelTKey: 'controlnet.normalBae',
|
||||
descriptionTKey: 'controlnet.normalBaeDescription',
|
||||
buildDefaults: () => ({
|
||||
id: 'normalbae_image_processor',
|
||||
type: 'normalbae_image_processor',
|
||||
}),
|
||||
buildNode: (image, config) => ({
|
||||
...config,
|
||||
image: { image_name: image.imageName },
|
||||
detect_resolution: minDim(image),
|
||||
image_resolution: minDim(image),
|
||||
}),
|
||||
},
|
||||
dw_openpose_image_processor: {
|
||||
type: 'dw_openpose_image_processor',
|
||||
labelTKey: 'controlnet.dwOpenpose',
|
||||
descriptionTKey: 'controlnet.dwOpenposeDescription',
|
||||
buildDefaults: () => ({
|
||||
id: 'dw_openpose_image_processor',
|
||||
type: 'dw_openpose_image_processor',
|
||||
draw_body: true,
|
||||
draw_face: false,
|
||||
draw_hands: false,
|
||||
}),
|
||||
buildNode: (image, config) => ({
|
||||
...config,
|
||||
image: { image_name: image.imageName },
|
||||
image_resolution: minDim(image),
|
||||
}),
|
||||
},
|
||||
pidi_image_processor: {
|
||||
type: 'pidi_image_processor',
|
||||
labelTKey: 'controlnet.pidi',
|
||||
descriptionTKey: 'controlnet.pidiDescription',
|
||||
buildDefaults: () => ({
|
||||
id: 'pidi_image_processor',
|
||||
type: 'pidi_image_processor',
|
||||
scribble: false,
|
||||
safe: false,
|
||||
}),
|
||||
buildNode: (image, config) => ({
|
||||
...config,
|
||||
image: { image_name: image.imageName },
|
||||
detect_resolution: minDim(image),
|
||||
image_resolution: minDim(image),
|
||||
}),
|
||||
},
|
||||
zoe_depth_image_processor: {
|
||||
type: 'zoe_depth_image_processor',
|
||||
labelTKey: 'controlnet.depthZoe',
|
||||
descriptionTKey: 'controlnet.depthZoeDescription',
|
||||
buildDefaults: () => ({
|
||||
id: 'zoe_depth_image_processor',
|
||||
type: 'zoe_depth_image_processor',
|
||||
}),
|
||||
buildNode: (image, config) => ({
|
||||
...config,
|
||||
image: { image_name: image.imageName },
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
const initialControlNet: Omit<ControlNetConfig, 'id'> = {
|
||||
type: 'controlnet',
|
||||
model: null,
|
||||
weight: 1,
|
||||
beginEndStepPct: [0, 1],
|
||||
controlMode: 'balanced',
|
||||
image: null,
|
||||
processedImage: null,
|
||||
isProcessingImage: false,
|
||||
processorConfig: CONTROLNET_PROCESSORS.canny_image_processor.buildDefaults(),
|
||||
};
|
||||
|
||||
const initialT2IAdapter: Omit<T2IAdapterConfig, 'id'> = {
|
||||
type: 't2i_adapter',
|
||||
model: null,
|
||||
weight: 1,
|
||||
beginEndStepPct: [0, 1],
|
||||
image: null,
|
||||
processedImage: null,
|
||||
isProcessingImage: false,
|
||||
processorConfig: CONTROLNET_PROCESSORS.canny_image_processor.buildDefaults(),
|
||||
};
|
||||
|
||||
const initialIPAdapter: Omit<IPAdapterConfig, 'id'> = {
|
||||
type: 'ip_adapter',
|
||||
image: null,
|
||||
model: null,
|
||||
beginEndStepPct: [0, 1],
|
||||
method: 'full',
|
||||
clipVisionModel: 'ViT-H',
|
||||
weight: 1,
|
||||
};
|
||||
|
||||
export const buildControlNet = (id: string, overrides?: Partial<ControlNetConfig>): ControlNetConfig => {
|
||||
return merge(deepClone(initialControlNet), { id, ...overrides });
|
||||
};
|
||||
|
||||
export const buildT2IAdapter = (id: string, overrides?: Partial<T2IAdapterConfig>): T2IAdapterConfig => {
|
||||
return merge(deepClone(initialT2IAdapter), { id, ...overrides });
|
||||
};
|
||||
|
||||
export const buildIPAdapter = (id: string, overrides?: Partial<IPAdapterConfig>): IPAdapterConfig => {
|
||||
return merge(deepClone(initialIPAdapter), { id, ...overrides });
|
||||
};
|
||||
|
||||
export const buildControlAdapterProcessor = (
|
||||
modelConfig: ControlNetModelConfig | T2IAdapterModelConfig
|
||||
): ProcessorConfig | null => {
|
||||
const defaultPreprocessor = modelConfig.default_settings?.preprocessor;
|
||||
if (!isProcessorType(defaultPreprocessor)) {
|
||||
return null;
|
||||
}
|
||||
const processorConfig = CONTROLNET_PROCESSORS[defaultPreprocessor].buildDefaults(modelConfig.base);
|
||||
return processorConfig;
|
||||
};
|
||||
|
||||
export const imageDTOToImageWithDims = ({ image_name, width, height }: ImageDTO): ImageWithDims => ({
|
||||
imageName: image_name,
|
||||
width,
|
||||
height,
|
||||
});
|
||||
|
||||
export const t2iAdapterToControlNet = (t2iAdapter: T2IAdapterConfig): ControlNetConfig => {
|
||||
return {
|
||||
...deepClone(t2iAdapter),
|
||||
type: 'controlnet',
|
||||
controlMode: initialControlNet.controlMode,
|
||||
};
|
||||
};
|
||||
|
||||
export const controlNetToT2IAdapter = (controlNet: ControlNetConfig): T2IAdapterConfig => {
|
||||
return {
|
||||
...omit(deepClone(controlNet), 'controlMode'),
|
||||
type: 't2i_adapter',
|
||||
};
|
||||
};
|
@ -1,7 +1,7 @@
|
||||
import { getStore } from 'app/store/nanostores/store';
|
||||
import openBase64ImageInTab from 'common/util/openBase64ImageInTab';
|
||||
import { blobToDataURL } from 'features/canvas/util/blobToDataURL';
|
||||
import { isRegionalGuidanceLayer, regional_guidance_layer_NAME } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { isRegionalGuidanceLayer, RG_LAYER_NAME } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { renderers } from 'features/controlLayers/util/renderers';
|
||||
import Konva from 'konva';
|
||||
import { assert } from 'tsafe';
|
||||
@ -24,7 +24,7 @@ export const getRegionalPromptLayerBlobs = async (
|
||||
const stage = new Konva.Stage({ container, width, height });
|
||||
renderers.renderLayers(stage, reduxLayers, 1, 'brush');
|
||||
|
||||
const konvaLayers = stage.find<Konva.Layer>(`.${regional_guidance_layer_NAME}`);
|
||||
const konvaLayers = stage.find<Konva.Layer>(`.${RG_LAYER_NAME}`);
|
||||
const blobs: Record<string, Blob> = {};
|
||||
|
||||
// First remove all layers
|
||||
|
@ -5,20 +5,20 @@ import {
|
||||
$tool,
|
||||
BACKGROUND_LAYER_ID,
|
||||
BACKGROUND_RECT_ID,
|
||||
CONTROLNET_LAYER_IMAGE_NAME,
|
||||
CONTROLNET_LAYER_NAME,
|
||||
getControlNetLayerImageId,
|
||||
CA_LAYER_IMAGE_NAME,
|
||||
CA_LAYER_NAME,
|
||||
getCALayerImageId,
|
||||
getLayerBboxId,
|
||||
getRegionalGuidanceLayerObjectGroupId,
|
||||
getRGLayerObjectGroupId,
|
||||
isControlAdapterLayer,
|
||||
isRegionalGuidanceLayer,
|
||||
isRenderableLayer,
|
||||
LAYER_BBOX_NAME,
|
||||
NO_LAYERS_MESSAGE_LAYER_ID,
|
||||
regional_guidance_layer_LINE_NAME,
|
||||
regional_guidance_layer_NAME,
|
||||
regional_guidance_layer_OBJECT_GROUP_NAME,
|
||||
regional_guidance_layer_RECT_NAME,
|
||||
RG_LAYER_LINE_NAME,
|
||||
RG_LAYER_NAME,
|
||||
RG_LAYER_OBJECT_GROUP_NAME,
|
||||
RG_LAYER_RECT_NAME,
|
||||
TOOL_PREVIEW_BRUSH_BORDER_INNER_ID,
|
||||
TOOL_PREVIEW_BRUSH_BORDER_OUTER_ID,
|
||||
TOOL_PREVIEW_BRUSH_FILL_ID,
|
||||
@ -52,11 +52,10 @@ const STAGE_BG_DATAURL =
|
||||
|
||||
const mapId = (object: { id: string }) => object.id;
|
||||
|
||||
const selectRenderableLayers = (n: Konva.Node) =>
|
||||
n.name() === regional_guidance_layer_NAME || n.name() === CONTROLNET_LAYER_NAME;
|
||||
const selectRenderableLayers = (n: Konva.Node) => n.name() === RG_LAYER_NAME || n.name() === CA_LAYER_NAME;
|
||||
|
||||
const selectVectorMaskObjects = (node: Konva.Node) => {
|
||||
return node.name() === regional_guidance_layer_LINE_NAME || node.name() === regional_guidance_layer_RECT_NAME;
|
||||
return node.name() === RG_LAYER_LINE_NAME || node.name() === RG_LAYER_RECT_NAME;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -141,7 +140,7 @@ const renderToolPreview = (
|
||||
isMouseOver: boolean,
|
||||
brushSize: number
|
||||
) => {
|
||||
const layerCount = stage.find(`.${regional_guidance_layer_NAME}`).length;
|
||||
const layerCount = stage.find(`.${RG_LAYER_NAME}`).length;
|
||||
// Update the stage's pointer style
|
||||
if (layerCount === 0) {
|
||||
// We have no layers, so we should not render any tool
|
||||
@ -233,7 +232,7 @@ const createRegionalGuidanceLayer = (
|
||||
// This layer hasn't been added to the konva state yet
|
||||
const konvaLayer = new Konva.Layer({
|
||||
id: reduxLayer.id,
|
||||
name: regional_guidance_layer_NAME,
|
||||
name: RG_LAYER_NAME,
|
||||
draggable: true,
|
||||
dragDistance: 0,
|
||||
});
|
||||
@ -265,8 +264,8 @@ const createRegionalGuidanceLayer = (
|
||||
|
||||
// The object group holds all of the layer's objects (e.g. lines and rects)
|
||||
const konvaObjectGroup = new Konva.Group({
|
||||
id: getRegionalGuidanceLayerObjectGroupId(reduxLayer.id, uuidv4()),
|
||||
name: regional_guidance_layer_OBJECT_GROUP_NAME,
|
||||
id: getRGLayerObjectGroupId(reduxLayer.id, uuidv4()),
|
||||
name: RG_LAYER_OBJECT_GROUP_NAME,
|
||||
listening: false,
|
||||
});
|
||||
konvaLayer.add(konvaObjectGroup);
|
||||
@ -285,7 +284,7 @@ const createVectorMaskLine = (reduxObject: VectorMaskLine, konvaGroup: Konva.Gro
|
||||
const vectorMaskLine = new Konva.Line({
|
||||
id: reduxObject.id,
|
||||
key: reduxObject.id,
|
||||
name: regional_guidance_layer_LINE_NAME,
|
||||
name: RG_LAYER_LINE_NAME,
|
||||
strokeWidth: reduxObject.strokeWidth,
|
||||
tension: 0,
|
||||
lineCap: 'round',
|
||||
@ -307,7 +306,7 @@ const createVectorMaskRect = (reduxObject: VectorMaskRect, konvaGroup: Konva.Gro
|
||||
const vectorMaskRect = new Konva.Rect({
|
||||
id: reduxObject.id,
|
||||
key: reduxObject.id,
|
||||
name: regional_guidance_layer_RECT_NAME,
|
||||
name: RG_LAYER_RECT_NAME,
|
||||
x: reduxObject.x,
|
||||
y: reduxObject.y,
|
||||
width: reduxObject.width,
|
||||
@ -347,7 +346,7 @@ const renderRegionalGuidanceLayer = (
|
||||
// Convert the color to a string, stripping the alpha - the object group will handle opacity.
|
||||
const rgbColor = rgbColorToString(reduxLayer.previewColor);
|
||||
|
||||
const konvaObjectGroup = konvaLayer.findOne<Konva.Group>(`.${regional_guidance_layer_OBJECT_GROUP_NAME}`);
|
||||
const konvaObjectGroup = konvaLayer.findOne<Konva.Group>(`.${RG_LAYER_OBJECT_GROUP_NAME}`);
|
||||
assert(konvaObjectGroup, `Object group not found for layer ${reduxLayer.id}`);
|
||||
|
||||
// We use caching to handle "global" layer opacity, but caching is expensive and we should only do it when required.
|
||||
@ -411,7 +410,7 @@ const renderRegionalGuidanceLayer = (
|
||||
const createControlNetLayer = (stage: Konva.Stage, reduxLayer: ControlAdapterLayer): Konva.Layer => {
|
||||
const konvaLayer = new Konva.Layer({
|
||||
id: reduxLayer.id,
|
||||
name: CONTROLNET_LAYER_NAME,
|
||||
name: CA_LAYER_NAME,
|
||||
imageSmoothingEnabled: true,
|
||||
});
|
||||
stage.add(konvaLayer);
|
||||
@ -420,7 +419,7 @@ const createControlNetLayer = (stage: Konva.Stage, reduxLayer: ControlAdapterLay
|
||||
|
||||
const createControlNetLayerImage = (konvaLayer: Konva.Layer, image: HTMLImageElement): Konva.Image => {
|
||||
const konvaImage = new Konva.Image({
|
||||
name: CONTROLNET_LAYER_IMAGE_NAME,
|
||||
name: CA_LAYER_IMAGE_NAME,
|
||||
image,
|
||||
});
|
||||
konvaLayer.add(konvaImage);
|
||||
@ -432,32 +431,32 @@ const updateControlNetLayerImageSource = async (
|
||||
konvaLayer: Konva.Layer,
|
||||
reduxLayer: ControlAdapterLayer
|
||||
) => {
|
||||
if (reduxLayer.imageName) {
|
||||
const imageName = reduxLayer.imageName;
|
||||
const req = getStore().dispatch(imagesApi.endpoints.getImageDTO.initiate(reduxLayer.imageName));
|
||||
const image = reduxLayer.controlAdapter.processedImage ?? reduxLayer.controlAdapter.image;
|
||||
if (image) {
|
||||
const { imageName } = image;
|
||||
const req = getStore().dispatch(imagesApi.endpoints.getImageDTO.initiate(imageName));
|
||||
const imageDTO = await req.unwrap();
|
||||
req.unsubscribe();
|
||||
const image = new Image();
|
||||
const imageId = getControlNetLayerImageId(reduxLayer.id, imageName);
|
||||
image.onload = () => {
|
||||
const imageEl = new Image();
|
||||
const imageId = getCALayerImageId(reduxLayer.id, imageName);
|
||||
imageEl.onload = () => {
|
||||
// Find the existing image or create a new one - must find using the name, bc the id may have just changed
|
||||
const konvaImage =
|
||||
konvaLayer.findOne<Konva.Image>(`.${CONTROLNET_LAYER_IMAGE_NAME}`) ??
|
||||
createControlNetLayerImage(konvaLayer, image);
|
||||
konvaLayer.findOne<Konva.Image>(`.${CA_LAYER_IMAGE_NAME}`) ?? createControlNetLayerImage(konvaLayer, imageEl);
|
||||
|
||||
// Update the image's attributes
|
||||
konvaImage.setAttrs({
|
||||
id: imageId,
|
||||
image,
|
||||
image: imageEl,
|
||||
});
|
||||
updateControlNetLayerImageAttrs(stage, konvaImage, reduxLayer);
|
||||
// Must cache after this to apply the filters
|
||||
konvaImage.cache();
|
||||
image.id = imageId;
|
||||
imageEl.id = imageId;
|
||||
};
|
||||
image.src = imageDTO.image_url;
|
||||
imageEl.src = imageDTO.image_url;
|
||||
} else {
|
||||
konvaLayer.findOne(`.${CONTROLNET_LAYER_IMAGE_NAME}`)?.destroy();
|
||||
konvaLayer.findOne(`.${CA_LAYER_IMAGE_NAME}`)?.destroy();
|
||||
}
|
||||
};
|
||||
|
||||
@ -497,16 +496,14 @@ const updateControlNetLayerImageAttrs = (
|
||||
|
||||
const renderControlNetLayer = (stage: Konva.Stage, reduxLayer: ControlAdapterLayer) => {
|
||||
const konvaLayer = stage.findOne<Konva.Layer>(`#${reduxLayer.id}`) ?? createControlNetLayer(stage, reduxLayer);
|
||||
const konvaImage = konvaLayer.findOne<Konva.Image>(`.${CONTROLNET_LAYER_IMAGE_NAME}`);
|
||||
const konvaImage = konvaLayer.findOne<Konva.Image>(`.${CA_LAYER_IMAGE_NAME}`);
|
||||
const canvasImageSource = konvaImage?.image();
|
||||
let imageSourceNeedsUpdate = false;
|
||||
if (canvasImageSource instanceof HTMLImageElement) {
|
||||
if (
|
||||
reduxLayer.imageName &&
|
||||
canvasImageSource.id !== getControlNetLayerImageId(reduxLayer.id, reduxLayer.imageName)
|
||||
) {
|
||||
const image = reduxLayer.controlAdapter.processedImage ?? reduxLayer.controlAdapter.image;
|
||||
if (image && canvasImageSource.id !== getCALayerImageId(reduxLayer.id, image.imageName)) {
|
||||
imageSourceNeedsUpdate = true;
|
||||
} else if (!reduxLayer.imageName) {
|
||||
} else if (!image) {
|
||||
imageSourceNeedsUpdate = true;
|
||||
}
|
||||
} else if (!canvasImageSource) {
|
||||
|
@ -33,6 +33,28 @@ type ControlAdapterDropData = BaseDropData & {
|
||||
};
|
||||
};
|
||||
|
||||
export type CALayerImageDropData = BaseDropData & {
|
||||
actionType: 'SET_CA_LAYER_IMAGE';
|
||||
context: {
|
||||
layerId: string;
|
||||
};
|
||||
};
|
||||
|
||||
export type IPALayerImageDropData = BaseDropData & {
|
||||
actionType: 'SET_IPA_LAYER_IMAGE';
|
||||
context: {
|
||||
layerId: string;
|
||||
};
|
||||
};
|
||||
|
||||
export type RGLayerIPAdapterImageDropData = BaseDropData & {
|
||||
actionType: 'SET_RG_LAYER_IP_ADAPTER_IMAGE';
|
||||
context: {
|
||||
layerId: string;
|
||||
ipAdapterId: string;
|
||||
};
|
||||
};
|
||||
|
||||
export type CanvasInitialImageDropData = BaseDropData & {
|
||||
actionType: 'SET_CANVAS_INITIAL_IMAGE';
|
||||
};
|
||||
@ -61,7 +83,10 @@ export type TypesafeDroppableData =
|
||||
| CanvasInitialImageDropData
|
||||
| NodesImageDropData
|
||||
| AddToBoardDropData
|
||||
| RemoveFromBoardDropData;
|
||||
| RemoveFromBoardDropData
|
||||
| CALayerImageDropData
|
||||
| IPALayerImageDropData
|
||||
| RGLayerIPAdapterImageDropData;
|
||||
|
||||
type BaseDragData = {
|
||||
id: string;
|
||||
|
@ -19,6 +19,12 @@ export const isValidDrop = (overData: TypesafeDroppableData | undefined, active:
|
||||
return payloadType === 'IMAGE_DTO';
|
||||
case 'SET_CONTROL_ADAPTER_IMAGE':
|
||||
return payloadType === 'IMAGE_DTO';
|
||||
case 'SET_CA_LAYER_IMAGE':
|
||||
return payloadType === 'IMAGE_DTO';
|
||||
case 'SET_IPA_LAYER_IMAGE':
|
||||
return payloadType === 'IMAGE_DTO';
|
||||
case 'SET_RG_LAYER_IP_ADAPTER_IMAGE':
|
||||
return payloadType === 'IMAGE_DTO';
|
||||
case 'SET_CANVAS_INITIAL_IMAGE':
|
||||
return payloadType === 'IMAGE_DTO';
|
||||
case 'SET_NODES_IMAGE':
|
||||
|
@ -49,6 +49,7 @@ export const zSchedulerField = z.enum([
|
||||
'euler_a',
|
||||
'kdpm_2_a',
|
||||
'lcm',
|
||||
'tcd',
|
||||
]);
|
||||
export type SchedulerField = z.infer<typeof zSchedulerField>;
|
||||
// #endregion
|
||||
|
@ -1,9 +1,23 @@
|
||||
import { getStore } from 'app/store/nanostores/store';
|
||||
import type { RootState } from 'app/store/store';
|
||||
import { selectAllIPAdapters } from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import { isRegionalGuidanceLayer } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { getRegionalPromptLayerBlobs } from 'features/controlLayers/util/getLayerBlobs';
|
||||
import {
|
||||
isControlAdapterLayer,
|
||||
isIPAdapterLayer,
|
||||
isRegionalGuidanceLayer,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import {
|
||||
type ControlNetConfig,
|
||||
type ImageWithDims,
|
||||
type IPAdapterConfig,
|
||||
isControlNetConfig,
|
||||
isT2IAdapterConfig,
|
||||
type ProcessorConfig,
|
||||
type T2IAdapterConfig,
|
||||
} from 'features/controlLayers/util/controlAdapters';
|
||||
import { getRegionalPromptLayerBlobs } from 'features/controlLayers/util/getLayerBlobs';
|
||||
import type { ImageField } from 'features/nodes/types/common';
|
||||
import {
|
||||
CONTROL_NET_COLLECT,
|
||||
IP_ADAPTER_COLLECT,
|
||||
NEGATIVE_CONDITIONING,
|
||||
NEGATIVE_CONDITIONING_COLLECT,
|
||||
@ -14,45 +28,383 @@ import {
|
||||
PROMPT_REGION_NEGATIVE_COND_PREFIX,
|
||||
PROMPT_REGION_POSITIVE_COND_INVERTED_PREFIX,
|
||||
PROMPT_REGION_POSITIVE_COND_PREFIX,
|
||||
T2I_ADAPTER_COLLECT,
|
||||
} from 'features/nodes/util/graph/constants';
|
||||
import { upsertMetadata } from 'features/nodes/util/graph/metadata';
|
||||
import { size } from 'lodash-es';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import type { CollectInvocation, Edge, IPAdapterInvocation, NonNullableGraph, S } from 'services/api/types';
|
||||
import type {
|
||||
CollectInvocation,
|
||||
ControlNetInvocation,
|
||||
CoreMetadataInvocation,
|
||||
Edge,
|
||||
IPAdapterInvocation,
|
||||
NonNullableGraph,
|
||||
S,
|
||||
T2IAdapterInvocation,
|
||||
} from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
export const addControlLayersToGraph = async (state: RootState, graph: NonNullableGraph, denoiseNodeId: string) => {
|
||||
if (!state.controlLayers.present.isEnabled) {
|
||||
const buildControlImage = (
|
||||
image: ImageWithDims | null,
|
||||
processedImage: ImageWithDims | null,
|
||||
processorConfig: ProcessorConfig | null
|
||||
): ImageField => {
|
||||
if (processedImage && processorConfig) {
|
||||
// We've processed the image in the app - use it for the control image.
|
||||
return {
|
||||
image_name: processedImage.imageName,
|
||||
};
|
||||
} else if (image) {
|
||||
// No processor selected, and we have an image - the user provided a processed image, use it for the control image.
|
||||
return {
|
||||
image_name: image.imageName,
|
||||
};
|
||||
}
|
||||
assert(false, 'Attempted to add unprocessed control image');
|
||||
};
|
||||
|
||||
const buildControlNetMetadata = (controlNet: ControlNetConfig): S['ControlNetMetadataField'] => {
|
||||
const { beginEndStepPct, controlMode, image, model, processedImage, processorConfig, weight } = controlNet;
|
||||
|
||||
assert(model, 'ControlNet model is required');
|
||||
assert(image, 'ControlNet image is required');
|
||||
|
||||
const processed_image =
|
||||
processedImage && processorConfig
|
||||
? {
|
||||
image_name: processedImage.imageName,
|
||||
}
|
||||
: null;
|
||||
|
||||
return {
|
||||
control_model: model,
|
||||
control_weight: weight,
|
||||
control_mode: controlMode,
|
||||
begin_step_percent: beginEndStepPct[0],
|
||||
end_step_percent: beginEndStepPct[1],
|
||||
resize_mode: 'just_resize',
|
||||
image: {
|
||||
image_name: image.imageName,
|
||||
},
|
||||
processed_image,
|
||||
};
|
||||
};
|
||||
|
||||
const addControlNetCollectorSafe = (graph: NonNullableGraph, denoiseNodeId: string) => {
|
||||
if (graph.nodes[CONTROL_NET_COLLECT]) {
|
||||
// You see, we've already got one!
|
||||
return;
|
||||
}
|
||||
// Add the ControlNet collector
|
||||
const controlNetIterateNode: CollectInvocation = {
|
||||
id: CONTROL_NET_COLLECT,
|
||||
type: 'collect',
|
||||
is_intermediate: true,
|
||||
};
|
||||
graph.nodes[CONTROL_NET_COLLECT] = controlNetIterateNode;
|
||||
graph.edges.push({
|
||||
source: { node_id: CONTROL_NET_COLLECT, field: 'collection' },
|
||||
destination: {
|
||||
node_id: denoiseNodeId,
|
||||
field: 'control',
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
const addGlobalControlNetsToGraph = async (
|
||||
controlNets: ControlNetConfig[],
|
||||
graph: NonNullableGraph,
|
||||
denoiseNodeId: string
|
||||
) => {
|
||||
if (controlNets.length === 0) {
|
||||
return;
|
||||
}
|
||||
const controlNetMetadata: CoreMetadataInvocation['controlnets'] = [];
|
||||
addControlNetCollectorSafe(graph, denoiseNodeId);
|
||||
|
||||
for (const controlNet of controlNets) {
|
||||
if (!controlNet.model) {
|
||||
return;
|
||||
}
|
||||
const { id, beginEndStepPct, controlMode, image, model, processedImage, processorConfig, weight } = controlNet;
|
||||
|
||||
const controlNetNode: ControlNetInvocation = {
|
||||
id: `control_net_${id}`,
|
||||
type: 'controlnet',
|
||||
is_intermediate: true,
|
||||
begin_step_percent: beginEndStepPct[0],
|
||||
end_step_percent: beginEndStepPct[1],
|
||||
control_mode: controlMode,
|
||||
resize_mode: 'just_resize',
|
||||
control_model: model,
|
||||
control_weight: weight,
|
||||
image: buildControlImage(image, processedImage, processorConfig),
|
||||
};
|
||||
|
||||
graph.nodes[controlNetNode.id] = controlNetNode;
|
||||
|
||||
controlNetMetadata.push(buildControlNetMetadata(controlNet));
|
||||
|
||||
graph.edges.push({
|
||||
source: { node_id: controlNetNode.id, field: 'control' },
|
||||
destination: {
|
||||
node_id: CONTROL_NET_COLLECT,
|
||||
field: 'item',
|
||||
},
|
||||
});
|
||||
}
|
||||
upsertMetadata(graph, { controlnets: controlNetMetadata });
|
||||
};
|
||||
|
||||
const buildT2IAdapterMetadata = (t2iAdapter: T2IAdapterConfig): S['T2IAdapterMetadataField'] => {
|
||||
const { beginEndStepPct, image, model, processedImage, processorConfig, weight } = t2iAdapter;
|
||||
|
||||
assert(model, 'T2I Adapter model is required');
|
||||
assert(image, 'T2I Adapter image is required');
|
||||
|
||||
const processed_image =
|
||||
processedImage && processorConfig
|
||||
? {
|
||||
image_name: processedImage.imageName,
|
||||
}
|
||||
: null;
|
||||
|
||||
return {
|
||||
t2i_adapter_model: model,
|
||||
weight,
|
||||
begin_step_percent: beginEndStepPct[0],
|
||||
end_step_percent: beginEndStepPct[1],
|
||||
resize_mode: 'just_resize',
|
||||
image: {
|
||||
image_name: image.imageName,
|
||||
},
|
||||
processed_image,
|
||||
};
|
||||
};
|
||||
|
||||
const addT2IAdapterCollectorSafe = (graph: NonNullableGraph, denoiseNodeId: string) => {
|
||||
if (graph.nodes[T2I_ADAPTER_COLLECT]) {
|
||||
// You see, we've already got one!
|
||||
return;
|
||||
}
|
||||
// Even though denoise_latents' t2i adapter input is collection or scalar, keep it simple and always use a collect
|
||||
const t2iAdapterCollectNode: CollectInvocation = {
|
||||
id: T2I_ADAPTER_COLLECT,
|
||||
type: 'collect',
|
||||
is_intermediate: true,
|
||||
};
|
||||
graph.nodes[T2I_ADAPTER_COLLECT] = t2iAdapterCollectNode;
|
||||
graph.edges.push({
|
||||
source: { node_id: T2I_ADAPTER_COLLECT, field: 'collection' },
|
||||
destination: {
|
||||
node_id: denoiseNodeId,
|
||||
field: 't2i_adapter',
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
const addGlobalT2IAdaptersToGraph = async (
|
||||
t2iAdapters: T2IAdapterConfig[],
|
||||
graph: NonNullableGraph,
|
||||
denoiseNodeId: string
|
||||
) => {
|
||||
if (t2iAdapters.length === 0) {
|
||||
return;
|
||||
}
|
||||
const t2iAdapterMetadata: CoreMetadataInvocation['t2iAdapters'] = [];
|
||||
addT2IAdapterCollectorSafe(graph, denoiseNodeId);
|
||||
|
||||
for (const t2iAdapter of t2iAdapters) {
|
||||
if (!t2iAdapter.model) {
|
||||
return;
|
||||
}
|
||||
const { id, beginEndStepPct, image, model, processedImage, processorConfig, weight } = t2iAdapter;
|
||||
|
||||
const t2iAdapterNode: T2IAdapterInvocation = {
|
||||
id: `t2i_adapter_${id}`,
|
||||
type: 't2i_adapter',
|
||||
is_intermediate: true,
|
||||
begin_step_percent: beginEndStepPct[0],
|
||||
end_step_percent: beginEndStepPct[1],
|
||||
resize_mode: 'just_resize',
|
||||
t2i_adapter_model: model,
|
||||
weight: weight,
|
||||
image: buildControlImage(image, processedImage, processorConfig),
|
||||
};
|
||||
|
||||
graph.nodes[t2iAdapterNode.id] = t2iAdapterNode;
|
||||
|
||||
t2iAdapterMetadata.push(buildT2IAdapterMetadata(t2iAdapter));
|
||||
|
||||
graph.edges.push({
|
||||
source: { node_id: t2iAdapterNode.id, field: 't2i_adapter' },
|
||||
destination: {
|
||||
node_id: T2I_ADAPTER_COLLECT,
|
||||
field: 'item',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
upsertMetadata(graph, { t2iAdapters: t2iAdapterMetadata });
|
||||
};
|
||||
|
||||
const buildIPAdapterMetadata = (ipAdapter: IPAdapterConfig): S['IPAdapterMetadataField'] => {
|
||||
const { weight, model, clipVisionModel, method, beginEndStepPct, image } = ipAdapter;
|
||||
|
||||
assert(model, 'IP Adapter model is required');
|
||||
assert(image, 'IP Adapter image is required');
|
||||
|
||||
return {
|
||||
ip_adapter_model: model,
|
||||
clip_vision_model: clipVisionModel,
|
||||
weight,
|
||||
method,
|
||||
begin_step_percent: beginEndStepPct[0],
|
||||
end_step_percent: beginEndStepPct[1],
|
||||
image: {
|
||||
image_name: image.imageName,
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
const addIPAdapterCollectorSafe = (graph: NonNullableGraph, denoiseNodeId: string) => {
|
||||
if (graph.nodes[IP_ADAPTER_COLLECT]) {
|
||||
// You see, we've already got one!
|
||||
return;
|
||||
}
|
||||
|
||||
const ipAdapterCollectNode: CollectInvocation = {
|
||||
id: IP_ADAPTER_COLLECT,
|
||||
type: 'collect',
|
||||
is_intermediate: true,
|
||||
};
|
||||
graph.nodes[IP_ADAPTER_COLLECT] = ipAdapterCollectNode;
|
||||
graph.edges.push({
|
||||
source: { node_id: IP_ADAPTER_COLLECT, field: 'collection' },
|
||||
destination: {
|
||||
node_id: denoiseNodeId,
|
||||
field: 'ip_adapter',
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
const addGlobalIPAdaptersToGraph = async (
|
||||
ipAdapters: IPAdapterConfig[],
|
||||
graph: NonNullableGraph,
|
||||
denoiseNodeId: string
|
||||
) => {
|
||||
if (ipAdapters.length === 0) {
|
||||
return;
|
||||
}
|
||||
const ipAdapterMetdata: CoreMetadataInvocation['ipAdapters'] = [];
|
||||
addIPAdapterCollectorSafe(graph, denoiseNodeId);
|
||||
|
||||
for (const ipAdapter of ipAdapters) {
|
||||
const { id, weight, model, clipVisionModel, method, beginEndStepPct, image } = ipAdapter;
|
||||
assert(image, 'IP Adapter image is required');
|
||||
assert(model, 'IP Adapter model is required');
|
||||
|
||||
const ipAdapterNode: IPAdapterInvocation = {
|
||||
id: `ip_adapter_${id}`,
|
||||
type: 'ip_adapter',
|
||||
is_intermediate: true,
|
||||
weight,
|
||||
method,
|
||||
ip_adapter_model: model,
|
||||
clip_vision_model: clipVisionModel,
|
||||
begin_step_percent: beginEndStepPct[0],
|
||||
end_step_percent: beginEndStepPct[1],
|
||||
image: {
|
||||
image_name: image.imageName,
|
||||
},
|
||||
};
|
||||
|
||||
graph.nodes[ipAdapterNode.id] = ipAdapterNode;
|
||||
|
||||
ipAdapterMetdata.push(buildIPAdapterMetadata(ipAdapter));
|
||||
|
||||
graph.edges.push({
|
||||
source: { node_id: ipAdapterNode.id, field: 'ip_adapter' },
|
||||
destination: {
|
||||
node_id: IP_ADAPTER_COLLECT,
|
||||
field: 'item',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
upsertMetadata(graph, { ipAdapters: ipAdapterMetdata });
|
||||
};
|
||||
|
||||
export const addControlLayersToGraph = async (state: RootState, graph: NonNullableGraph, denoiseNodeId: string) => {
|
||||
const { dispatch } = getStore();
|
||||
const isSDXL = state.generation.model?.base === 'sdxl';
|
||||
const layers = state.controlLayers.present.layers
|
||||
// Only support vector mask layers now
|
||||
// TODO: Image masks
|
||||
const mainModel = state.generation.model;
|
||||
assert(mainModel, 'Missing main model when building graph');
|
||||
const isSDXL = mainModel.base === 'sdxl';
|
||||
|
||||
// Add global control adapters
|
||||
const globalControlNets = state.controlLayers.present.layers
|
||||
// Must be a CA layer
|
||||
.filter(isControlAdapterLayer)
|
||||
// Must be enabled
|
||||
.filter((l) => l.isEnabled)
|
||||
// We want the CAs themselves
|
||||
.map((l) => l.controlAdapter)
|
||||
// Must be a ControlNet
|
||||
.filter(isControlNetConfig)
|
||||
.filter((ca) => {
|
||||
const hasModel = Boolean(ca.model);
|
||||
const modelMatchesBase = ca.model?.base === mainModel.base;
|
||||
const hasControlImage = ca.image || (ca.processedImage && ca.processorConfig);
|
||||
return hasModel && modelMatchesBase && hasControlImage;
|
||||
});
|
||||
addGlobalControlNetsToGraph(globalControlNets, graph, denoiseNodeId);
|
||||
|
||||
const globalT2IAdapters = state.controlLayers.present.layers
|
||||
// Must be a CA layer
|
||||
.filter(isControlAdapterLayer)
|
||||
// Must be enabled
|
||||
.filter((l) => l.isEnabled)
|
||||
// We want the CAs themselves
|
||||
.map((l) => l.controlAdapter)
|
||||
// Must have a ControlNet CA
|
||||
.filter(isT2IAdapterConfig)
|
||||
.filter((ca) => {
|
||||
const hasModel = Boolean(ca.model);
|
||||
const modelMatchesBase = ca.model?.base === mainModel.base;
|
||||
const hasControlImage = ca.image || (ca.processedImage && ca.processorConfig);
|
||||
return hasModel && modelMatchesBase && hasControlImage;
|
||||
});
|
||||
addGlobalT2IAdaptersToGraph(globalT2IAdapters, graph, denoiseNodeId);
|
||||
|
||||
const globalIPAdapters = state.controlLayers.present.layers
|
||||
// Must be an IP Adapter layer
|
||||
.filter(isIPAdapterLayer)
|
||||
// Must be enabled
|
||||
.filter((l) => l.isEnabled)
|
||||
// We want the IP Adapters themselves
|
||||
.map((l) => l.ipAdapter)
|
||||
.filter((ca) => {
|
||||
const hasModel = Boolean(ca.model);
|
||||
const modelMatchesBase = ca.model?.base === mainModel.base;
|
||||
const hasControlImage = Boolean(ca.image);
|
||||
return hasModel && modelMatchesBase && hasControlImage;
|
||||
});
|
||||
addGlobalIPAdaptersToGraph(globalIPAdapters, graph, denoiseNodeId);
|
||||
|
||||
const rgLayers = state.controlLayers.present.layers
|
||||
// Only RG layers are get masks
|
||||
.filter(isRegionalGuidanceLayer)
|
||||
// Only visible layers are rendered on the canvas
|
||||
.filter((l) => l.isEnabled)
|
||||
// Only layers with prompts get added to the graph
|
||||
.filter((l) => {
|
||||
const hasTextPrompt = Boolean(l.positivePrompt || l.negativePrompt);
|
||||
const hasIPAdapter = l.ipAdapterIds.length !== 0;
|
||||
const hasIPAdapter = l.ipAdapters.length !== 0;
|
||||
return hasTextPrompt || hasIPAdapter;
|
||||
});
|
||||
|
||||
// Collect all IP Adapter ids for IP adapter layers
|
||||
const layerIPAdapterIds = layers.flatMap((l) => l.ipAdapterIds);
|
||||
|
||||
const regionalIPAdapters = selectAllIPAdapters(state.controlAdapters).filter(
|
||||
({ id, model, controlImage, isEnabled }) => {
|
||||
const hasModel = Boolean(model);
|
||||
const doesBaseMatch = model?.base === state.generation.model?.base;
|
||||
const hasControlImage = controlImage;
|
||||
const isRegional = layerIPAdapterIds.includes(id);
|
||||
return isEnabled && hasModel && doesBaseMatch && hasControlImage && isRegional;
|
||||
}
|
||||
);
|
||||
|
||||
const layerIds = layers.map((l) => l.id);
|
||||
const layerIds = rgLayers.map((l) => l.id);
|
||||
const blobs = await getRegionalPromptLayerBlobs(layerIds);
|
||||
assert(size(blobs) === size(layerIds), 'Mismatch between layer IDs and blobs');
|
||||
|
||||
@ -118,27 +470,11 @@ export const addControlLayersToGraph = async (state: RootState, graph: NonNullab
|
||||
},
|
||||
});
|
||||
|
||||
if (!graph.nodes[IP_ADAPTER_COLLECT] && regionalIPAdapters.length > 0) {
|
||||
const ipAdapterCollectNode: CollectInvocation = {
|
||||
id: IP_ADAPTER_COLLECT,
|
||||
type: 'collect',
|
||||
is_intermediate: true,
|
||||
};
|
||||
graph.nodes[IP_ADAPTER_COLLECT] = ipAdapterCollectNode;
|
||||
graph.edges.push({
|
||||
source: { node_id: IP_ADAPTER_COLLECT, field: 'collection' },
|
||||
destination: {
|
||||
node_id: denoiseNodeId,
|
||||
field: 'ip_adapter',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// Upload the blobs to the backend, add each to graph
|
||||
// TODO: Store the uploaded image names in redux to reuse them, so long as the layer hasn't otherwise changed. This
|
||||
// would be a great perf win - not only would we skip re-uploading the same image, but we'd be able to use the node
|
||||
// cache (currently, when we re-use the same mask data, since it is a different image, the node cache is not used).
|
||||
for (const layer of layers) {
|
||||
for (const layer of rgLayers) {
|
||||
const blob = blobs[layer.id];
|
||||
assert(blob, `Blob for layer ${layer.id} not found`);
|
||||
|
||||
@ -296,36 +632,32 @@ export const addControlLayersToGraph = async (state: RootState, graph: NonNullab
|
||||
}
|
||||
}
|
||||
|
||||
for (const ipAdapterId of layer.ipAdapterIds) {
|
||||
const ipAdapter = selectAllIPAdapters(state.controlAdapters)
|
||||
.filter(({ id, model, controlImage, isEnabled }) => {
|
||||
const hasModel = Boolean(model);
|
||||
const doesBaseMatch = model?.base === state.generation.model?.base;
|
||||
const hasControlImage = controlImage;
|
||||
const isRegional = layers.some((l) => l.ipAdapterIds.includes(id));
|
||||
return isEnabled && hasModel && doesBaseMatch && hasControlImage && isRegional;
|
||||
})
|
||||
.find((ca) => ca.id === ipAdapterId);
|
||||
// TODO(psyche): For some reason, I have to explicitly annotate regionalIPAdapters here. Not sure why.
|
||||
const regionalIPAdapters: IPAdapterConfig[] = layer.ipAdapters.filter((ipAdapter) => {
|
||||
const hasModel = Boolean(ipAdapter.model);
|
||||
const modelMatchesBase = ipAdapter.model?.base === mainModel.base;
|
||||
const hasControlImage = Boolean(ipAdapter.image);
|
||||
return hasModel && modelMatchesBase && hasControlImage;
|
||||
});
|
||||
|
||||
if (!ipAdapter?.model) {
|
||||
return;
|
||||
}
|
||||
const { id, weight, model, clipVisionModel, method, beginStepPct, endStepPct, controlImage } = ipAdapter;
|
||||
|
||||
assert(controlImage, 'IP Adapter image is required');
|
||||
for (const ipAdapter of regionalIPAdapters) {
|
||||
addIPAdapterCollectorSafe(graph, denoiseNodeId);
|
||||
const { id, weight, model, clipVisionModel, method, beginEndStepPct, image } = ipAdapter;
|
||||
assert(model, 'IP Adapter model is required');
|
||||
assert(image, 'IP Adapter image is required');
|
||||
|
||||
const ipAdapterNode: IPAdapterInvocation = {
|
||||
id: `ip_adapter_${id}`,
|
||||
type: 'ip_adapter',
|
||||
is_intermediate: true,
|
||||
weight: weight,
|
||||
method: method,
|
||||
weight,
|
||||
method,
|
||||
ip_adapter_model: model,
|
||||
clip_vision_model: clipVisionModel,
|
||||
begin_step_percent: beginStepPct,
|
||||
end_step_percent: endStepPct,
|
||||
begin_step_percent: beginEndStepPct[0],
|
||||
end_step_percent: beginEndStepPct[1],
|
||||
image: {
|
||||
image_name: controlImage,
|
||||
image_name: image.imageName,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -1,10 +1,8 @@
|
||||
import type { RootState } from 'app/store/store';
|
||||
import { selectValidControlNets } from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import type { ControlAdapterProcessorType, ControlNetConfig } from 'features/controlAdapters/store/types';
|
||||
import { isControlAdapterLayer } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { ImageField } from 'features/nodes/types/common';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import { differenceWith, intersectionWith } from 'lodash-es';
|
||||
import type {
|
||||
CollectInvocation,
|
||||
ControlNetInvocation,
|
||||
@ -17,9 +15,13 @@ import { assert } from 'tsafe';
|
||||
import { CONTROL_NET_COLLECT } from './constants';
|
||||
import { upsertMetadata } from './metadata';
|
||||
|
||||
const getControlNets = (state: RootState) => {
|
||||
// Start with the valid controlnets
|
||||
const validControlNets = selectValidControlNets(state.controlAdapters).filter(
|
||||
export const addControlNetToLinearGraph = async (
|
||||
state: RootState,
|
||||
graph: NonNullableGraph,
|
||||
baseNodeId: string
|
||||
): Promise<void> => {
|
||||
const controlNetMetadata: CoreMetadataInvocation['controlnets'] = [];
|
||||
const controlNets = selectValidControlNets(state.controlAdapters).filter(
|
||||
({ model, processedControlImage, processorType, controlImage, isEnabled }) => {
|
||||
const hasModel = Boolean(model);
|
||||
const doesBaseMatch = model?.base === state.generation.model?.base;
|
||||
@ -29,35 +31,9 @@ const getControlNets = (state: RootState) => {
|
||||
}
|
||||
);
|
||||
|
||||
// txt2img tab has special handling - it uses layers exclusively, while the other tabs use the older control adapters
|
||||
// accordion. We need to filter the list of valid T2I adapters according to the tab.
|
||||
// The txt2img tab has special handling - its control adapters are set up in the Control Layers graph helper.
|
||||
const activeTabName = activeTabNameSelector(state);
|
||||
|
||||
if (activeTabName === 'txt2img') {
|
||||
// Add only the cnets that are used in control layers
|
||||
// Collect all ControlNet ids for enabled ControlNet layers
|
||||
const layerControlNetIds = state.controlLayers.present.layers
|
||||
.filter(isControlAdapterLayer)
|
||||
.filter((l) => l.isEnabled)
|
||||
.map((l) => l.controlNetId);
|
||||
return intersectionWith(validControlNets, layerControlNetIds, (a, b) => a.id === b);
|
||||
} else {
|
||||
// Else, we want to exclude the cnets that are used in control layers
|
||||
// Collect all ControlNet ids for all ControlNet layers
|
||||
const layerControlNetIds = state.controlLayers.present.layers
|
||||
.filter(isControlAdapterLayer)
|
||||
.map((l) => l.controlNetId);
|
||||
return differenceWith(validControlNets, layerControlNetIds, (a, b) => a.id === b);
|
||||
}
|
||||
};
|
||||
|
||||
export const addControlNetToLinearGraph = async (
|
||||
state: RootState,
|
||||
graph: NonNullableGraph,
|
||||
baseNodeId: string
|
||||
): Promise<void> => {
|
||||
const controlNets = getControlNets(state);
|
||||
const controlNetMetadata: CoreMetadataInvocation['controlnets'] = [];
|
||||
assert(activeTabName !== 'txt2img', 'Tried to use addControlNetToLinearGraph on txt2img tab');
|
||||
|
||||
if (controlNets.length) {
|
||||
// Even though denoise_latents' control input is collection or scalar, keep it simple and always use a collect
|
||||
|
@ -1,10 +1,8 @@
|
||||
import type { RootState } from 'app/store/store';
|
||||
import { selectValidIPAdapters } from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import type { IPAdapterConfig } from 'features/controlAdapters/store/types';
|
||||
import { isIPAdapterLayer, isRegionalGuidanceLayer } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { ImageField } from 'features/nodes/types/common';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import { differenceWith, intersectionWith } from 'lodash-es';
|
||||
import type {
|
||||
CollectInvocation,
|
||||
CoreMetadataInvocation,
|
||||
@ -17,48 +15,21 @@ import { assert } from 'tsafe';
|
||||
import { IP_ADAPTER_COLLECT } from './constants';
|
||||
import { upsertMetadata } from './metadata';
|
||||
|
||||
const getIPAdapters = (state: RootState) => {
|
||||
// Start with the valid IP adapters
|
||||
const validIPAdapters = selectValidIPAdapters(state.controlAdapters).filter(({ model, controlImage, isEnabled }) => {
|
||||
const hasModel = Boolean(model);
|
||||
const doesBaseMatch = model?.base === state.generation.model?.base;
|
||||
const hasControlImage = controlImage;
|
||||
return isEnabled && hasModel && doesBaseMatch && hasControlImage;
|
||||
});
|
||||
|
||||
// Masked IP adapters are handled in the graph helper for regional control - skip them here
|
||||
const maskedIPAdapterIds = state.controlLayers.present.layers
|
||||
.filter(isRegionalGuidanceLayer)
|
||||
.map((l) => l.ipAdapterIds)
|
||||
.flat();
|
||||
const nonMaskedIPAdapters = differenceWith(validIPAdapters, maskedIPAdapterIds, (a, b) => a.id === b);
|
||||
|
||||
// txt2img tab has special handling - it uses layers exclusively, while the other tabs use the older control adapters
|
||||
// accordion. We need to filter the list of valid IP adapters according to the tab.
|
||||
const activeTabName = activeTabNameSelector(state);
|
||||
|
||||
if (activeTabName === 'txt2img') {
|
||||
// If we are on the t2i tab, we only want to add the IP adapters that are used in unmasked IP Adapter layers
|
||||
// Collect all IP Adapter ids for enabled IP adapter layers
|
||||
const layerIPAdapterIds = state.controlLayers.present.layers
|
||||
.filter(isIPAdapterLayer)
|
||||
.filter((l) => l.isEnabled)
|
||||
.map((l) => l.ipAdapterId);
|
||||
return intersectionWith(nonMaskedIPAdapters, layerIPAdapterIds, (a, b) => a.id === b);
|
||||
} else {
|
||||
// Else, we want to exclude the IP adapters that are used in IP Adapter layers
|
||||
// Collect all IP Adapter ids for enabled IP adapter layers
|
||||
const layerIPAdapterIds = state.controlLayers.present.layers.filter(isIPAdapterLayer).map((l) => l.ipAdapterId);
|
||||
return differenceWith(nonMaskedIPAdapters, layerIPAdapterIds, (a, b) => a.id === b);
|
||||
}
|
||||
};
|
||||
|
||||
export const addIPAdapterToLinearGraph = async (
|
||||
state: RootState,
|
||||
graph: NonNullableGraph,
|
||||
baseNodeId: string
|
||||
): Promise<void> => {
|
||||
const ipAdapters = getIPAdapters(state);
|
||||
// The txt2img tab has special handling - its control adapters are set up in the Control Layers graph helper.
|
||||
const activeTabName = activeTabNameSelector(state);
|
||||
assert(activeTabName !== 'txt2img', 'Tried to use addT2IAdaptersToLinearGraph on txt2img tab');
|
||||
|
||||
const ipAdapters = selectValidIPAdapters(state.controlAdapters).filter(({ model, controlImage, isEnabled }) => {
|
||||
const hasModel = Boolean(model);
|
||||
const doesBaseMatch = model?.base === state.generation.model?.base;
|
||||
const hasControlImage = controlImage;
|
||||
return isEnabled && hasModel && doesBaseMatch && hasControlImage;
|
||||
});
|
||||
|
||||
if (ipAdapters.length) {
|
||||
// Even though denoise_latents' ip adapter input is collection or scalar, keep it simple and always use a collect
|
||||
|
@ -1,10 +1,8 @@
|
||||
import type { RootState } from 'app/store/store';
|
||||
import { selectValidT2IAdapters } from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import type { ControlAdapterProcessorType, T2IAdapterConfig } from 'features/controlAdapters/store/types';
|
||||
import { isControlAdapterLayer } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { ImageField } from 'features/nodes/types/common';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import { differenceWith, intersectionWith } from 'lodash-es';
|
||||
import type {
|
||||
CollectInvocation,
|
||||
CoreMetadataInvocation,
|
||||
@ -17,9 +15,16 @@ import { assert } from 'tsafe';
|
||||
import { T2I_ADAPTER_COLLECT } from './constants';
|
||||
import { upsertMetadata } from './metadata';
|
||||
|
||||
const getT2IAdapters = (state: RootState) => {
|
||||
// Start with the valid controlnets
|
||||
const validT2IAdapters = selectValidT2IAdapters(state.controlAdapters).filter(
|
||||
export const addT2IAdaptersToLinearGraph = async (
|
||||
state: RootState,
|
||||
graph: NonNullableGraph,
|
||||
baseNodeId: string
|
||||
): Promise<void> => {
|
||||
// The txt2img tab has special handling - its control adapters are set up in the Control Layers graph helper.
|
||||
const activeTabName = activeTabNameSelector(state);
|
||||
assert(activeTabName !== 'txt2img', 'Tried to use addT2IAdaptersToLinearGraph on txt2img tab');
|
||||
|
||||
const t2iAdapters = selectValidT2IAdapters(state.controlAdapters).filter(
|
||||
({ model, processedControlImage, processorType, controlImage, isEnabled }) => {
|
||||
const hasModel = Boolean(model);
|
||||
const doesBaseMatch = model?.base === state.generation.model?.base;
|
||||
@ -29,34 +34,6 @@ const getT2IAdapters = (state: RootState) => {
|
||||
}
|
||||
);
|
||||
|
||||
// txt2img tab has special handling - it uses layers exclusively, while the other tabs use the older control adapters
|
||||
// accordion. We need to filter the list of valid T2I adapters according to the tab.
|
||||
const activeTabName = activeTabNameSelector(state);
|
||||
|
||||
if (activeTabName === 'txt2img') {
|
||||
// Add only the T2Is that are used in control layers
|
||||
// Collect all ids for enabled control adapter layers
|
||||
const layerControlAdapterIds = state.controlLayers.present.layers
|
||||
.filter(isControlAdapterLayer)
|
||||
.filter((l) => l.isEnabled)
|
||||
.map((l) => l.controlNetId);
|
||||
return intersectionWith(validT2IAdapters, layerControlAdapterIds, (a, b) => a.id === b);
|
||||
} else {
|
||||
// Else, we want to exclude the T2Is that are used in control layers
|
||||
const layerControlAdapterIds = state.controlLayers.present.layers
|
||||
.filter(isControlAdapterLayer)
|
||||
.map((l) => l.controlNetId);
|
||||
return differenceWith(validT2IAdapters, layerControlAdapterIds, (a, b) => a.id === b);
|
||||
}
|
||||
};
|
||||
|
||||
export const addT2IAdaptersToLinearGraph = async (
|
||||
state: RootState,
|
||||
graph: NonNullableGraph,
|
||||
baseNodeId: string
|
||||
): Promise<void> => {
|
||||
const t2iAdapters = getT2IAdapters(state);
|
||||
|
||||
if (t2iAdapters.length) {
|
||||
// Even though denoise_latents' t2i adapter input is collection or scalar, keep it simple and always use a collect
|
||||
const t2iAdapterCollectNode: CollectInvocation = {
|
||||
|
@ -4,13 +4,10 @@ import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetch
|
||||
import { addControlLayersToGraph } from 'features/nodes/util/graph/addControlLayersToGraph';
|
||||
import { isNonRefinerMainModelConfig, type NonNullableGraph } from 'services/api/types';
|
||||
|
||||
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
|
||||
import { addIPAdapterToLinearGraph } from './addIPAdapterToLinearGraph';
|
||||
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
|
||||
import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph';
|
||||
import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph';
|
||||
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
|
||||
import { addT2IAdaptersToLinearGraph } from './addT2IAdapterToLinearGraph';
|
||||
import { addVAEToGraph } from './addVAEToGraph';
|
||||
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
|
||||
import {
|
||||
@ -264,14 +261,6 @@ export const buildLinearSDXLTextToImageGraph = async (state: RootState): Promise
|
||||
// add LoRA support
|
||||
await addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId);
|
||||
|
||||
// add controlnet, mutating `graph`
|
||||
await addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);
|
||||
|
||||
// add IP Adapter
|
||||
await addIPAdapterToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);
|
||||
|
||||
await addT2IAdaptersToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);
|
||||
|
||||
await addControlLayersToGraph(state, graph, SDXL_DENOISE_LATENTS);
|
||||
|
||||
// NSFW & watermark - must be last thing added to graph
|
||||
|
@ -5,13 +5,10 @@ import { addControlLayersToGraph } from 'features/nodes/util/graph/addControlLay
|
||||
import { getBoardField, getIsIntermediate } from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import { isNonRefinerMainModelConfig, type NonNullableGraph } from 'services/api/types';
|
||||
|
||||
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
|
||||
import { addHrfToGraph } from './addHrfToGraph';
|
||||
import { addIPAdapterToLinearGraph } from './addIPAdapterToLinearGraph';
|
||||
import { addLoRAsToGraph } from './addLoRAsToGraph';
|
||||
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
|
||||
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
|
||||
import { addT2IAdaptersToLinearGraph } from './addT2IAdapterToLinearGraph';
|
||||
import { addVAEToGraph } from './addVAEToGraph';
|
||||
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
|
||||
import {
|
||||
@ -246,14 +243,6 @@ export const buildLinearTextToImageGraph = async (state: RootState): Promise<Non
|
||||
// add LoRA support
|
||||
await addLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId);
|
||||
|
||||
// add controlnet, mutating `graph`
|
||||
await addControlNetToLinearGraph(state, graph, DENOISE_LATENTS);
|
||||
|
||||
// add IP Adapter
|
||||
await addIPAdapterToLinearGraph(state, graph, DENOISE_LATENTS);
|
||||
|
||||
await addT2IAdaptersToLinearGraph(state, graph, DENOISE_LATENTS);
|
||||
|
||||
await addControlLayersToGraph(state, graph, DENOISE_LATENTS);
|
||||
|
||||
// High resolution fix.
|
||||
|
@ -75,4 +75,5 @@ export const SCHEDULER_OPTIONS: ComboboxOption[] = [
|
||||
{ value: 'euler_a', label: 'Euler Ancestral' },
|
||||
{ value: 'kdpm_2_a', label: 'KDPM 2 Ancestral' },
|
||||
{ value: 'lcm', label: 'LCM' },
|
||||
{ value: 'tcd', label: 'TCD' },
|
||||
].sort((a, b) => a.label.localeCompare(b.label));
|
||||
|
@ -13,66 +13,52 @@ import {
|
||||
selectValidIPAdapters,
|
||||
selectValidT2IAdapters,
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import { selectAllControlAdapterIds, selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { useStandaloneAccordionToggle } from 'features/settingsAccordions/hooks/useStandaloneAccordionToggle';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { Fragment, memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
const selector = createMemoizedSelector(
|
||||
[selectControlAdaptersSlice, selectControlLayersSlice],
|
||||
(controlAdapters, controlLayers) => {
|
||||
const badges: string[] = [];
|
||||
let isError = false;
|
||||
const selector = createMemoizedSelector([selectControlAdaptersSlice], (controlAdapters) => {
|
||||
const badges: string[] = [];
|
||||
let isError = false;
|
||||
|
||||
const controlLayersAdapterIds = selectAllControlAdapterIds(controlLayers.present);
|
||||
const enabledNonRegionalIPAdapterCount = selectAllIPAdapters(controlAdapters).filter((ca) => ca.isEnabled).length;
|
||||
|
||||
const enabledNonRegionalIPAdapterCount = selectAllIPAdapters(controlAdapters)
|
||||
.filter((ca) => !controlLayersAdapterIds.includes(ca.id))
|
||||
.filter((ca) => ca.isEnabled).length;
|
||||
|
||||
const validIPAdapterCount = selectValidIPAdapters(controlAdapters).length;
|
||||
if (enabledNonRegionalIPAdapterCount > 0) {
|
||||
badges.push(`${enabledNonRegionalIPAdapterCount} IP`);
|
||||
}
|
||||
if (enabledNonRegionalIPAdapterCount > validIPAdapterCount) {
|
||||
isError = true;
|
||||
}
|
||||
|
||||
const enabledControlNetCount = selectAllControlNets(controlAdapters)
|
||||
.filter((ca) => !controlLayersAdapterIds.includes(ca.id))
|
||||
.filter((ca) => ca.isEnabled).length;
|
||||
const validControlNetCount = selectValidControlNets(controlAdapters).length;
|
||||
if (enabledControlNetCount > 0) {
|
||||
badges.push(`${enabledControlNetCount} ControlNet`);
|
||||
}
|
||||
if (enabledControlNetCount > validControlNetCount) {
|
||||
isError = true;
|
||||
}
|
||||
|
||||
const enabledT2IAdapterCount = selectAllT2IAdapters(controlAdapters)
|
||||
.filter((ca) => !controlLayersAdapterIds.includes(ca.id))
|
||||
.filter((ca) => ca.isEnabled).length;
|
||||
const validT2IAdapterCount = selectValidT2IAdapters(controlAdapters).length;
|
||||
if (enabledT2IAdapterCount > 0) {
|
||||
badges.push(`${enabledT2IAdapterCount} T2I`);
|
||||
}
|
||||
if (enabledT2IAdapterCount > validT2IAdapterCount) {
|
||||
isError = true;
|
||||
}
|
||||
|
||||
const controlAdapterIds = selectControlAdapterIds(controlAdapters).filter(
|
||||
(id) => !controlLayersAdapterIds.includes(id)
|
||||
);
|
||||
|
||||
return {
|
||||
controlAdapterIds,
|
||||
badges,
|
||||
isError, // TODO: Add some visual indicator that the control adapters are in an error state
|
||||
};
|
||||
const validIPAdapterCount = selectValidIPAdapters(controlAdapters).length;
|
||||
if (enabledNonRegionalIPAdapterCount > 0) {
|
||||
badges.push(`${enabledNonRegionalIPAdapterCount} IP`);
|
||||
}
|
||||
);
|
||||
if (enabledNonRegionalIPAdapterCount > validIPAdapterCount) {
|
||||
isError = true;
|
||||
}
|
||||
|
||||
const enabledControlNetCount = selectAllControlNets(controlAdapters).filter((ca) => ca.isEnabled).length;
|
||||
const validControlNetCount = selectValidControlNets(controlAdapters).length;
|
||||
if (enabledControlNetCount > 0) {
|
||||
badges.push(`${enabledControlNetCount} ControlNet`);
|
||||
}
|
||||
if (enabledControlNetCount > validControlNetCount) {
|
||||
isError = true;
|
||||
}
|
||||
|
||||
const enabledT2IAdapterCount = selectAllT2IAdapters(controlAdapters).filter((ca) => ca.isEnabled).length;
|
||||
const validT2IAdapterCount = selectValidT2IAdapters(controlAdapters).length;
|
||||
if (enabledT2IAdapterCount > 0) {
|
||||
badges.push(`${enabledT2IAdapterCount} T2I`);
|
||||
}
|
||||
if (enabledT2IAdapterCount > validT2IAdapterCount) {
|
||||
isError = true;
|
||||
}
|
||||
|
||||
const controlAdapterIds = selectControlAdapterIds(controlAdapters);
|
||||
|
||||
return {
|
||||
controlAdapterIds,
|
||||
badges,
|
||||
isError, // TODO: Add some visual indicator that the control adapters are in an error state
|
||||
};
|
||||
});
|
||||
|
||||
export const ControlSettingsAccordion: React.FC = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
|
@ -16,6 +16,9 @@ export const ImageSizeCanvas = memo(() => {
|
||||
|
||||
const onChangeWidth = useCallback(
|
||||
(width: number) => {
|
||||
if (width === 0) {
|
||||
return;
|
||||
}
|
||||
dispatch(setBoundingBoxDimensions({ width }, optimalDimension));
|
||||
},
|
||||
[dispatch, optimalDimension]
|
||||
@ -23,6 +26,9 @@ export const ImageSizeCanvas = memo(() => {
|
||||
|
||||
const onChangeHeight = useCallback(
|
||||
(height: number) => {
|
||||
if (height === 0) {
|
||||
return;
|
||||
}
|
||||
dispatch(setBoundingBoxDimensions({ height }, optimalDimension));
|
||||
},
|
||||
[dispatch, optimalDimension]
|
||||
|
@ -18,6 +18,9 @@ export const ImageSizeLinear = memo(() => {
|
||||
|
||||
const onChangeWidth = useCallback(
|
||||
(width: number) => {
|
||||
if (width === 0) {
|
||||
return;
|
||||
}
|
||||
dispatch(widthChanged({ width }));
|
||||
},
|
||||
[dispatch]
|
||||
@ -25,6 +28,9 @@ export const ImageSizeLinear = memo(() => {
|
||||
|
||||
const onChangeHeight = useCallback(
|
||||
(height: number) => {
|
||||
if (height === 0) {
|
||||
return;
|
||||
}
|
||||
dispatch(heightChanged({ height }));
|
||||
},
|
||||
[dispatch]
|
||||
|
@ -4,6 +4,7 @@ import { modelConfigsAdapterSelectors, useGetModelConfigsQuery } from 'services/
|
||||
import type { AnyModelConfig } from 'services/api/types';
|
||||
import {
|
||||
isControlNetModelConfig,
|
||||
isControlNetOrT2IAdapterModelConfig,
|
||||
isIPAdapterModelConfig,
|
||||
isLoRAModelConfig,
|
||||
isNonRefinerMainModelConfig,
|
||||
@ -35,6 +36,7 @@ export const useNonSDXLMainModels = buildModelsHook(isNonSDXLMainModelConfig);
|
||||
export const useRefinerModels = buildModelsHook(isRefinerMainModelModelConfig);
|
||||
export const useSDXLModels = buildModelsHook(isSDXLMainModelModelConfig);
|
||||
export const useLoRAModels = buildModelsHook(isLoRAModelConfig);
|
||||
export const useControlNetAndT2IAdapterModels = buildModelsHook(isControlNetOrT2IAdapterModelConfig);
|
||||
export const useControlNetModels = buildModelsHook(isControlNetModelConfig);
|
||||
export const useT2IAdapterModels = buildModelsHook(isT2IAdapterModelConfig);
|
||||
export const useIPAdapterModels = buildModelsHook(isIPAdapterModelConfig);
|
||||
|
File diff suppressed because one or more lines are too long
@ -177,6 +177,22 @@ type ControlAdapterAction = {
|
||||
id: string;
|
||||
};
|
||||
|
||||
export type CALayerImagePostUploadAction = {
|
||||
type: 'SET_CA_LAYER_IMAGE';
|
||||
layerId: string;
|
||||
};
|
||||
|
||||
export type IPALayerImagePostUploadAction = {
|
||||
type: 'SET_IPA_LAYER_IMAGE';
|
||||
layerId: string;
|
||||
};
|
||||
|
||||
export type RGLayerIPAdapterImagePostUploadAction = {
|
||||
type: 'SET_RG_LAYER_IP_ADAPTER_IMAGE';
|
||||
layerId: string;
|
||||
ipAdapterId: string;
|
||||
};
|
||||
|
||||
type InitialImageAction = {
|
||||
type: 'SET_INITIAL_IMAGE';
|
||||
};
|
||||
@ -206,4 +222,7 @@ export type PostUploadAction =
|
||||
| NodesAction
|
||||
| CanvasInitialImageAction
|
||||
| ToastAction
|
||||
| AddToBatchAction;
|
||||
| AddToBatchAction
|
||||
| CALayerImagePostUploadAction
|
||||
| IPALayerImagePostUploadAction
|
||||
| RGLayerIPAdapterImagePostUploadAction;
|
||||
|
Loading…
Reference in New Issue
Block a user