diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index 36157e195a..cc1efa3a68 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -375,6 +375,9 @@ class ImageResizeInvocation(BaseInvocation): width: int = InputField(default=512, ge=64, multiple_of=8, description="The width to resize to (px)") height: int = InputField(default=512, ge=64, multiple_of=8, description="The height to resize to (px)") resample_mode: PIL_RESAMPLING_MODES = InputField(default="bicubic", description="The resampling mode") + metadata: Optional[CoreMetadata] = InputField( + default=None, description=FieldDescriptions.core_metadata, ui_hidden=True + ) def invoke(self, context: InvocationContext) -> ImageOutput: image = context.services.images.get_pil_image(self.image.image_name) @@ -393,6 +396,7 @@ class ImageResizeInvocation(BaseInvocation): node_id=self.id, session_id=context.graph_execution_state_id, is_intermediate=self.is_intermediate, + metadata=self.metadata.dict() if self.metadata else None, ) return ImageOutput( diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 314301663b..80988f3c71 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -21,6 +21,8 @@ from torchvision.transforms.functional import resize as tv_resize from invokeai.app.invocations.metadata import CoreMetadata from invokeai.app.invocations.primitives import ( + DenoiseMaskField, + DenoiseMaskOutput, ImageField, ImageOutput, LatentsField, @@ -31,8 +33,9 @@ from invokeai.app.util.controlnet_utils import prepare_control_image from invokeai.app.util.step_callback import stable_diffusion_step_callback from invokeai.backend.model_management.models import ModelType, SilenceWarnings -from ...backend.model_management.models import BaseModelType from ...backend.model_management.lora import ModelPatcher +from ...backend.model_management.seamless import set_seamless +from ...backend.model_management.models import BaseModelType from ...backend.stable_diffusion import PipelineIntermediateState from ...backend.stable_diffusion.diffusers_pipeline import ( ConditioningData, @@ -44,16 +47,7 @@ from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import Post from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP from ...backend.util.devices import choose_precision, choose_torch_device from ..models.image import ImageCategory, ResourceOrigin -from .baseinvocation import ( - BaseInvocation, - FieldDescriptions, - Input, - InputField, - InvocationContext, - UIType, - tags, - title, -) +from .baseinvocation import BaseInvocation, FieldDescriptions, Input, InputField, InvocationContext, UIType, tags, title from .compel import ConditioningField from .controlnet_image_processors import ControlField from .model import ModelInfo, UNetField, VaeField @@ -64,6 +58,72 @@ DEFAULT_PRECISION = choose_precision(choose_torch_device()) SAMPLER_NAME_VALUES = Literal[tuple(list(SCHEDULER_MAP.keys()))] +@title("Create Denoise Mask") +@tags("mask", "denoise") +class CreateDenoiseMaskInvocation(BaseInvocation): + """Creates mask for denoising model run.""" + + # Metadata + type: Literal["create_denoise_mask"] = "create_denoise_mask" + + # Inputs + vae: VaeField = InputField(description=FieldDescriptions.vae, input=Input.Connection, ui_order=0) + image: Optional[ImageField] = InputField(default=None, description="Image which will be masked", ui_order=1) + mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2) + tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3) + fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32, ui_order=4) + + def prep_mask_tensor(self, mask_image): + if mask_image.mode != "L": + mask_image = mask_image.convert("L") + mask_tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False) + if mask_tensor.dim() == 3: + mask_tensor = mask_tensor.unsqueeze(0) + # if shape is not None: + # mask_tensor = tv_resize(mask_tensor, shape, T.InterpolationMode.BILINEAR) + return mask_tensor + + @torch.no_grad() + def invoke(self, context: InvocationContext) -> DenoiseMaskOutput: + if self.image is not None: + image = context.services.images.get_pil_image(self.image.image_name) + image = image_resized_to_grid_as_tensor(image.convert("RGB")) + if image.dim() == 3: + image = image.unsqueeze(0) + else: + image = None + + mask = self.prep_mask_tensor( + context.services.images.get_pil_image(self.mask.image_name), + ) + + if image is not None: + vae_info = context.services.model_manager.get_model( + **self.vae.vae.dict(), + context=context, + ) + + img_mask = tv_resize(mask, image.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False) + masked_image = image * torch.where(img_mask < 0.5, 0.0, 1.0) + # TODO: + masked_latents = ImageToLatentsInvocation.vae_encode(vae_info, self.fp32, self.tiled, masked_image.clone()) + + masked_latents_name = f"{context.graph_execution_state_id}__{self.id}_masked_latents" + context.services.latents.save(masked_latents_name, masked_latents) + else: + masked_latents_name = None + + mask_name = f"{context.graph_execution_state_id}__{self.id}_mask" + context.services.latents.save(mask_name, mask) + + return DenoiseMaskOutput( + denoise_mask=DenoiseMaskField( + mask_name=mask_name, + masked_latents_name=masked_latents_name, + ), + ) + + def get_scheduler( context: InvocationContext, scheduler_info: ModelInfo, @@ -126,10 +186,8 @@ class DenoiseLatentsInvocation(BaseInvocation): control: Union[ControlField, list[ControlField]] = InputField( default=None, description=FieldDescriptions.control, input=Input.Connection, ui_order=5 ) - latents: Optional[LatentsField] = InputField( - description=FieldDescriptions.latents, input=Input.Connection, ui_order=4 - ) - mask: Optional[ImageField] = InputField( + latents: Optional[LatentsField] = InputField(description=FieldDescriptions.latents, input=Input.Connection) + denoise_mask: Optional[DenoiseMaskField] = InputField( default=None, description=FieldDescriptions.mask, ) @@ -342,19 +400,18 @@ class DenoiseLatentsInvocation(BaseInvocation): return num_inference_steps, timesteps, init_timestep - def prep_mask_tensor(self, mask, context, lantents): - if mask is None: - return None + def prep_inpaint_mask(self, context, latents): + if self.denoise_mask is None: + return None, None - mask_image = context.services.images.get_pil_image(mask.image_name) - if mask_image.mode != "L": - # FIXME: why do we get passed an RGB image here? We can only use single-channel. - mask_image = mask_image.convert("L") - mask_tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False) - if mask_tensor.dim() == 3: - mask_tensor = mask_tensor.unsqueeze(0) - mask_tensor = tv_resize(mask_tensor, lantents.shape[-2:], T.InterpolationMode.BILINEAR) - return 1 - mask_tensor + mask = context.services.latents.get(self.denoise_mask.mask_name) + mask = tv_resize(mask, latents.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False) + if self.denoise_mask.masked_latents_name is not None: + masked_latents = context.services.latents.get(self.denoise_mask.masked_latents_name) + else: + masked_latents = None + + return 1 - mask, masked_latents @torch.no_grad() def invoke(self, context: InvocationContext) -> LatentsOutput: @@ -375,7 +432,7 @@ class DenoiseLatentsInvocation(BaseInvocation): if seed is None: seed = 0 - mask = self.prep_mask_tensor(self.mask, context, latents) + mask, masked_latents = self.prep_inpaint_mask(context, latents) # Get the source node id (we are invoking the prepared node) graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id) @@ -400,12 +457,14 @@ class DenoiseLatentsInvocation(BaseInvocation): ) with ExitStack() as exit_stack, ModelPatcher.apply_lora_unet( unet_info.context.model, _lora_loader() - ), unet_info as unet: + ), set_seamless(unet_info.context.model, self.unet.seamless_axes), unet_info as unet: latents = latents.to(device=unet.device, dtype=unet.dtype) if noise is not None: noise = noise.to(device=unet.device, dtype=unet.dtype) if mask is not None: mask = mask.to(device=unet.device, dtype=unet.dtype) + if masked_latents is not None: + masked_latents = masked_latents.to(device=unet.device, dtype=unet.dtype) scheduler = get_scheduler( context=context, @@ -442,6 +501,7 @@ class DenoiseLatentsInvocation(BaseInvocation): noise=noise, seed=seed, mask=mask, + masked_latents=masked_latents, num_inference_steps=num_inference_steps, conditioning_data=conditioning_data, control_data=control_data, # list[ControlNetData] @@ -490,7 +550,7 @@ class LatentsToImageInvocation(BaseInvocation): context=context, ) - with vae_info as vae: + with set_seamless(vae_info.context.model, self.vae.seamless_axes), vae_info as vae: latents = latents.to(vae.device) if self.fp32: vae.to(dtype=torch.float32) @@ -663,26 +723,11 @@ class ImageToLatentsInvocation(BaseInvocation): tiled: bool = InputField(default=False, description=FieldDescriptions.tiled) fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32) - @torch.no_grad() - def invoke(self, context: InvocationContext) -> LatentsOutput: - # image = context.services.images.get( - # self.image.image_type, self.image.image_name - # ) - image = context.services.images.get_pil_image(self.image.image_name) - - # vae_info = context.services.model_manager.get_model(**self.vae.vae.dict()) - vae_info = context.services.model_manager.get_model( - **self.vae.vae.dict(), - context=context, - ) - - image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB")) - if image_tensor.dim() == 3: - image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w") - + @staticmethod + def vae_encode(vae_info, upcast, tiled, image_tensor): with vae_info as vae: orig_dtype = vae.dtype - if self.fp32: + if upcast: vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( @@ -707,7 +752,7 @@ class ImageToLatentsInvocation(BaseInvocation): vae.to(dtype=torch.float16) # latents = latents.half() - if self.tiled: + if tiled: vae.enable_tiling() else: vae.disable_tiling() @@ -721,6 +766,23 @@ class ImageToLatentsInvocation(BaseInvocation): latents = vae.config.scaling_factor * latents latents = latents.to(dtype=orig_dtype) + return latents + + @torch.no_grad() + def invoke(self, context: InvocationContext) -> LatentsOutput: + image = context.services.images.get_pil_image(self.image.image_name) + + vae_info = context.services.model_manager.get_model( + **self.vae.vae.dict(), + context=context, + ) + + image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB")) + if image_tensor.dim() == 3: + image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w") + + latents = self.vae_encode(vae_info, self.fp32, self.tiled, image_tensor) + name = f"{context.graph_execution_state_id}__{self.id}" latents = latents.to("cpu") context.services.latents.save(name, latents) diff --git a/invokeai/app/invocations/model.py b/invokeai/app/invocations/model.py index 3cae4b3383..31d04cfd37 100644 --- a/invokeai/app/invocations/model.py +++ b/invokeai/app/invocations/model.py @@ -8,8 +8,8 @@ from .baseinvocation import ( BaseInvocation, BaseInvocationOutput, FieldDescriptions, - InputField, Input, + InputField, InvocationContext, OutputField, UIType, @@ -33,6 +33,7 @@ class UNetField(BaseModel): unet: ModelInfo = Field(description="Info to load unet submodel") scheduler: ModelInfo = Field(description="Info to load scheduler submodel") loras: List[LoraInfo] = Field(description="Loras to apply on model loading") + seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless') class ClipField(BaseModel): @@ -45,6 +46,7 @@ class ClipField(BaseModel): class VaeField(BaseModel): # TODO: better naming? vae: ModelInfo = Field(description="Info to load vae submodel") + seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless') class ModelLoaderOutput(BaseInvocationOutput): @@ -388,3 +390,50 @@ class VaeLoaderInvocation(BaseInvocation): ) ) ) + + +class SeamlessModeOutput(BaseInvocationOutput): + """Modified Seamless Model output""" + + type: Literal["seamless_output"] = "seamless_output" + + # Outputs + unet: Optional[UNetField] = OutputField(description=FieldDescriptions.unet, title="UNet") + vae: Optional[VaeField] = OutputField(description=FieldDescriptions.vae, title="VAE") + + +@title("Seamless") +@tags("seamless", "model") +class SeamlessModeInvocation(BaseInvocation): + """Applies the seamless transformation to the Model UNet and VAE.""" + + type: Literal["seamless"] = "seamless" + + # Inputs + unet: Optional[UNetField] = InputField( + default=None, description=FieldDescriptions.unet, input=Input.Connection, title="UNet" + ) + vae: Optional[VaeField] = InputField( + default=None, description=FieldDescriptions.vae_model, input=Input.Connection, title="VAE" + ) + seamless_y: bool = InputField(default=True, input=Input.Any, description="Specify whether Y axis is seamless") + seamless_x: bool = InputField(default=True, input=Input.Any, description="Specify whether X axis is seamless") + + def invoke(self, context: InvocationContext) -> SeamlessModeOutput: + # Conditionally append 'x' and 'y' based on seamless_x and seamless_y + unet = copy.deepcopy(self.unet) + vae = copy.deepcopy(self.vae) + + seamless_axes_list = [] + + if self.seamless_x: + seamless_axes_list.append("x") + if self.seamless_y: + seamless_axes_list.append("y") + + if unet is not None: + unet.seamless_axes = seamless_axes_list + if vae is not None: + vae.seamless_axes = seamless_axes_list + + return SeamlessModeOutput(unet=unet, vae=vae) diff --git a/invokeai/app/invocations/primitives.py b/invokeai/app/invocations/primitives.py index 607423e570..49cd49d189 100644 --- a/invokeai/app/invocations/primitives.py +++ b/invokeai/app/invocations/primitives.py @@ -294,6 +294,25 @@ class ImageCollectionInvocation(BaseInvocation): return ImageCollectionOutput(collection=self.collection) +# endregion + +# region DenoiseMask + + +class DenoiseMaskField(BaseModel): + """An inpaint mask field""" + + mask_name: str = Field(description="The name of the mask image") + masked_latents_name: Optional[str] = Field(description="The name of the masked image latents") + + +class DenoiseMaskOutput(BaseInvocationOutput): + """Base class for nodes that output a single image""" + + type: Literal["denoise_mask_output"] = "denoise_mask_output" + denoise_mask: DenoiseMaskField = OutputField(description="Mask for denoise model run") + + # endregion # region Latents diff --git a/invokeai/backend/image_util/seamless.py b/invokeai/backend/image_util/seamless.py index 6fb2617901..8a2580bfcc 100644 --- a/invokeai/backend/image_util/seamless.py +++ b/invokeai/backend/image_util/seamless.py @@ -20,7 +20,8 @@ def _conv_forward_asymmetric(self, input, weight, bias): def configure_model_padding(model, seamless, seamless_axes): """ - Modifies the 2D convolution layers to use a circular padding mode based on the `seamless` and `seamless_axes` options. + Modifies the 2D convolution layers to use a circular padding mode based on + the `seamless` and `seamless_axes` options. """ # TODO: get an explicit interface for this in diffusers: https://github.com/huggingface/diffusers/issues/556 for m in model.modules(): diff --git a/invokeai/backend/model_management/seamless.py b/invokeai/backend/model_management/seamless.py new file mode 100644 index 0000000000..54885769ad --- /dev/null +++ b/invokeai/backend/model_management/seamless.py @@ -0,0 +1,103 @@ +from __future__ import annotations + +from contextlib import contextmanager +from typing import List, Union + +import torch.nn as nn +from diffusers.models import AutoencoderKL, UNet2DConditionModel + + +def _conv_forward_asymmetric(self, input, weight, bias): + """ + Patch for Conv2d._conv_forward that supports asymmetric padding + """ + working = nn.functional.pad(input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"]) + working = nn.functional.pad(working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"]) + return nn.functional.conv2d( + working, + weight, + bias, + self.stride, + nn.modules.utils._pair(0), + self.dilation, + self.groups, + ) + + +@contextmanager +def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL], seamless_axes: List[str]): + try: + to_restore = [] + + for m_name, m in model.named_modules(): + if isinstance(model, UNet2DConditionModel): + if ".attentions." in m_name: + continue + + if ".resnets." in m_name: + if ".conv2" in m_name: + continue + if ".conv_shortcut" in m_name: + continue + + """ + if isinstance(model, UNet2DConditionModel): + if False and ".upsamplers." in m_name: + continue + + if False and ".downsamplers." in m_name: + continue + + if True and ".resnets." in m_name: + if True and ".conv1" in m_name: + if False and "down_blocks" in m_name: + continue + if False and "mid_block" in m_name: + continue + if False and "up_blocks" in m_name: + continue + + if True and ".conv2" in m_name: + continue + + if True and ".conv_shortcut" in m_name: + continue + + if True and ".attentions." in m_name: + continue + + if False and m_name in ["conv_in", "conv_out"]: + continue + """ + + if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): + print(f"applied - {m_name}") + m.asymmetric_padding_mode = {} + m.asymmetric_padding = {} + m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant" + m.asymmetric_padding["x"] = ( + m._reversed_padding_repeated_twice[0], + m._reversed_padding_repeated_twice[1], + 0, + 0, + ) + m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant" + m.asymmetric_padding["y"] = ( + 0, + 0, + m._reversed_padding_repeated_twice[2], + m._reversed_padding_repeated_twice[3], + ) + + to_restore.append((m, m._conv_forward)) + m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d) + + yield + + finally: + for module, orig_conv_forward in to_restore: + module._conv_forward = orig_conv_forward + if hasattr(m, "asymmetric_padding_mode"): + del m.asymmetric_padding_mode + if hasattr(m, "asymmetric_padding"): + del m.asymmetric_padding diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 63b0c78b51..2d1894c896 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -144,7 +144,7 @@ def image_resized_to_grid_as_tensor(image: PIL.Image.Image, normalize: bool = Tr w, h = trim_to_multiple_of(*image.size, multiple_of=multiple_of) transformation = T.Compose( [ - T.Resize((h, w), T.InterpolationMode.LANCZOS), + T.Resize((h, w), T.InterpolationMode.LANCZOS, antialias=True), T.ToTensor(), ] ) @@ -358,6 +358,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): callback: Callable[[PipelineIntermediateState], None] = None, control_data: List[ControlNetData] = None, mask: Optional[torch.Tensor] = None, + masked_latents: Optional[torch.Tensor] = None, seed: Optional[int] = None, ) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]: if init_timestep.shape[0] == 0: @@ -376,28 +377,28 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): latents = self.scheduler.add_noise(latents, noise, batched_t) if mask is not None: + # if no noise provided, noisify unmasked area based on seed(or 0 as fallback) + if noise is None: + noise = torch.randn( + orig_latents.shape, + dtype=torch.float32, + device="cpu", + generator=torch.Generator(device="cpu").manual_seed(seed or 0), + ).to(device=orig_latents.device, dtype=orig_latents.dtype) + + latents = self.scheduler.add_noise(latents, noise, batched_t) + latents = torch.lerp( + orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype) + ) + if is_inpainting_model(self.unet): - # You'd think the inpainting model wouldn't be paying attention to the area it is going to repaint - # (that's why there's a mask!) but it seems to really want that blanked out. - # masked_latents = latents * torch.where(mask < 0.5, 1, 0) TODO: inpaint/outpaint/infill + if masked_latents is None: + raise Exception("Source image required for inpaint mask when inpaint model used!") - # TODO: we should probably pass this in so we don't have to try/finally around setting it. - self.invokeai_diffuser.model_forward_callback = AddsMaskLatents(self._unet_forward, mask, orig_latents) + self.invokeai_diffuser.model_forward_callback = AddsMaskLatents( + self._unet_forward, mask, masked_latents + ) else: - # if no noise provided, noisify unmasked area based on seed(or 0 as fallback) - if noise is None: - noise = torch.randn( - orig_latents.shape, - dtype=torch.float32, - device="cpu", - generator=torch.Generator(device="cpu").manual_seed(seed or 0), - ).to(device=orig_latents.device, dtype=orig_latents.dtype) - - latents = self.scheduler.add_noise(latents, noise, batched_t) - latents = torch.lerp( - orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype) - ) - additional_guidance.append(AddsMaskGuidance(mask, orig_latents, self.scheduler, noise)) try: diff --git a/invokeai/backend/util/hotfixes.py b/invokeai/backend/util/hotfixes.py index 3d7f278f86..3d64d8a42c 100644 --- a/invokeai/backend/util/hotfixes.py +++ b/invokeai/backend/util/hotfixes.py @@ -761,3 +761,18 @@ class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin): diffusers.ControlNetModel = ControlNetModel diffusers.models.controlnet.ControlNetModel = ControlNetModel + + +# patch LoRACompatibleConv to use original Conv2D forward function +# this needed to make work seamless patch +# NOTE: with this patch, torch.compile crashes on 2.0 torch(already fixed in nightly) +# https://github.com/huggingface/diffusers/pull/4315 +# https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/lora.py#L96C18-L96C18 +def new_LoRACompatibleConv_forward(self, x): + if self.lora_layer is None: + return super(diffusers.models.lora.LoRACompatibleConv, self).forward(x) + else: + return super(diffusers.models.lora.LoRACompatibleConv, self).forward(x) + self.lora_layer(x) + + +diffusers.models.lora.LoRACompatibleConv.forward = new_LoRACompatibleConv_forward diff --git a/invokeai/frontend/web/src/app/components/App.tsx b/invokeai/frontend/web/src/app/components/App.tsx index c2cc4645b8..a70ed03fda 100644 --- a/invokeai/frontend/web/src/app/components/App.tsx +++ b/invokeai/frontend/web/src/app/components/App.tsx @@ -14,6 +14,7 @@ import i18n from 'i18n'; import { size } from 'lodash-es'; import { ReactNode, memo, useCallback, useEffect } from 'react'; import { ErrorBoundary } from 'react-error-boundary'; +import { usePreselectedImage } from '../../features/parameters/hooks/usePreselectedImage'; import AppErrorBoundaryFallback from './AppErrorBoundaryFallback'; import GlobalHotkeys from './GlobalHotkeys'; import Toaster from './Toaster'; @@ -23,13 +24,22 @@ const DEFAULT_CONFIG = {}; interface Props { config?: PartialAppConfig; headerComponent?: ReactNode; + selectedImage?: { + imageName: string; + action: 'sendToImg2Img' | 'sendToCanvas' | 'useAllParameters'; + }; } -const App = ({ config = DEFAULT_CONFIG, headerComponent }: Props) => { +const App = ({ + config = DEFAULT_CONFIG, + headerComponent, + selectedImage, +}: Props) => { const language = useAppSelector(languageSelector); const logger = useLogger('system'); const dispatch = useAppDispatch(); + const { handlePreselectedImage } = usePreselectedImage(); const handleReset = useCallback(() => { localStorage.clear(); location.reload(); @@ -51,6 +61,10 @@ const App = ({ config = DEFAULT_CONFIG, headerComponent }: Props) => { dispatch(appStarted()); }, [dispatch]); + useEffect(() => { + handlePreselectedImage(selectedImage); + }, [handlePreselectedImage, selectedImage]); + return ( { useEffect(() => { // configure API client token @@ -81,7 +86,11 @@ const InvokeAIUI = ({ }> - + diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx index 40af91d53a..35d6cc3361 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx @@ -8,7 +8,7 @@ import { ImageDraggableData, TypesafeDraggableData, } from 'features/dnd/types'; -import { useMultiselect } from 'features/gallery/hooks/useMultiselect.ts'; +import { useMultiselect } from 'features/gallery/hooks/useMultiselect'; import { MouseEvent, memo, useCallback, useMemo, useState } from 'react'; import { FaTrash } from 'react-icons/fa'; import { MdStar, MdStarBorder } from 'react-icons/md'; diff --git a/invokeai/frontend/web/src/features/gallery/hooks/useMultiselect.ts.ts b/invokeai/frontend/web/src/features/gallery/hooks/useMultiselect.ts similarity index 100% rename from invokeai/frontend/web/src/features/gallery/hooks/useMultiselect.ts.ts rename to invokeai/frontend/web/src/features/gallery/hooks/useMultiselect.ts diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/CurrentImage/CurrentImageNode.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/CurrentImage/CurrentImageNode.tsx index 6a8a2a3552..e081ccf471 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/CurrentImage/CurrentImageNode.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/CurrentImage/CurrentImageNode.tsx @@ -1,13 +1,15 @@ -import { Flex, Image, Text } from '@chakra-ui/react'; +import { useState, PropsWithChildren, memo } from 'react'; +import { useSelector } from 'react-redux'; import { createSelector } from '@reduxjs/toolkit'; -import { stateSelector } from 'app/store/store'; +import { Flex, Image, Text } from '@chakra-ui/react'; +import { motion } from 'framer-motion'; +import { NodeProps } from 'reactflow'; +import NodeWrapper from '../common/NodeWrapper'; +import NextPrevImageButtons from 'features/gallery/components/NextPrevImageButtons'; import IAIDndImage from 'common/components/IAIDndImage'; import { IAINoContentFallback } from 'common/components/IAIImageFallback'; import { DRAG_HANDLE_CLASSNAME } from 'features/nodes/types/constants'; -import { PropsWithChildren, memo } from 'react'; -import { useSelector } from 'react-redux'; -import { NodeProps } from 'reactflow'; -import NodeWrapper from '../common/NodeWrapper'; +import { stateSelector } from 'app/store/store'; const selector = createSelector(stateSelector, ({ system, gallery }) => { const imageDTO = gallery.selection[gallery.selection.length - 1]; @@ -54,44 +56,90 @@ const CurrentImageNode = (props: NodeProps) => { export default memo(CurrentImageNode); -const Wrapper = (props: PropsWithChildren<{ nodeProps: NodeProps }>) => ( - - ) => { + const [isHovering, setIsHovering] = useState(false); + + const handleMouseEnter = () => { + setIsHovering(true); + }; + + const handleMouseLeave = () => { + setIsHovering(false); + }; + + return ( + - - Current Image - + + Current Image + + + + {props.children} + {isHovering && ( + + + + )} + - - {props.children} - - - -); + + ); +}; diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx index 9b3ce100c8..bb9637cd73 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx @@ -10,6 +10,7 @@ import ColorInputField from './inputs/ColorInputField'; import ConditioningInputField from './inputs/ConditioningInputField'; import ControlInputField from './inputs/ControlInputField'; import ControlNetModelInputField from './inputs/ControlNetModelInputField'; +import DenoiseMaskInputField from './inputs/DenoiseMaskInputField'; import EnumInputField from './inputs/EnumInputField'; import ImageCollectionInputField from './inputs/ImageCollectionInputField'; import ImageInputField from './inputs/ImageInputField'; @@ -105,6 +106,19 @@ const InputFieldRenderer = ({ nodeId, fieldName }: InputFieldProps) => { ); } + if ( + field?.type === 'DenoiseMaskField' && + fieldTemplate?.type === 'DenoiseMaskField' + ) { + return ( + + ); + } + if ( field?.type === 'ConditioningField' && fieldTemplate?.type === 'ConditioningField' diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/DenoiseMaskInputField.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/DenoiseMaskInputField.tsx new file mode 100644 index 0000000000..79b2668887 --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/DenoiseMaskInputField.tsx @@ -0,0 +1,17 @@ +import { + DenoiseMaskInputFieldTemplate, + DenoiseMaskInputFieldValue, + FieldComponentProps, +} from 'features/nodes/types/types'; +import { memo } from 'react'; + +const DenoiseMaskInputFieldComponent = ( + _props: FieldComponentProps< + DenoiseMaskInputFieldValue, + DenoiseMaskInputFieldTemplate + > +) => { + return null; +}; + +export default memo(DenoiseMaskInputFieldComponent); diff --git a/invokeai/frontend/web/src/features/nodes/types/constants.ts b/invokeai/frontend/web/src/features/nodes/types/constants.ts index 4e61e46d8a..6809f3f6bb 100644 --- a/invokeai/frontend/web/src/features/nodes/types/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/types/constants.ts @@ -59,6 +59,11 @@ export const FIELDS: Record = { description: 'Images may be passed between nodes.', color: 'purple.500', }, + DenoiseMaskField: { + title: 'Denoise Mask', + description: 'Denoise Mask may be passed between nodes', + color: 'red.700', + }, LatentsField: { title: 'Latents', description: 'Latents may be passed between nodes.', diff --git a/invokeai/frontend/web/src/features/nodes/types/types.ts b/invokeai/frontend/web/src/features/nodes/types/types.ts index 26aa19bd9d..0eda9030a6 100644 --- a/invokeai/frontend/web/src/features/nodes/types/types.ts +++ b/invokeai/frontend/web/src/features/nodes/types/types.ts @@ -64,6 +64,7 @@ export const zFieldType = z.enum([ 'string', 'array', 'ImageField', + 'DenoiseMaskField', 'LatentsField', 'ConditioningField', 'ControlField', @@ -120,6 +121,7 @@ export type InputFieldTemplate = | StringInputFieldTemplate | BooleanInputFieldTemplate | ImageInputFieldTemplate + | DenoiseMaskInputFieldTemplate | LatentsInputFieldTemplate | ConditioningInputFieldTemplate | UNetInputFieldTemplate @@ -205,6 +207,12 @@ export const zConditioningField = z.object({ }); export type ConditioningField = z.infer; +export const zDenoiseMaskField = z.object({ + mask_name: z.string().trim().min(1), + masked_latents_name: z.string().trim().min(1).optional(), +}); +export type DenoiseMaskFieldValue = z.infer; + export const zIntegerInputFieldValue = zInputFieldValueBase.extend({ type: z.literal('integer'), value: z.number().optional(), @@ -241,6 +249,14 @@ export const zLatentsInputFieldValue = zInputFieldValueBase.extend({ }); export type LatentsInputFieldValue = z.infer; +export const zDenoiseMaskInputFieldValue = zInputFieldValueBase.extend({ + type: z.literal('DenoiseMaskField'), + value: zDenoiseMaskField.optional(), +}); +export type DenoiseMaskInputFieldValue = z.infer< + typeof zDenoiseMaskInputFieldValue +>; + export const zConditioningInputFieldValue = zInputFieldValueBase.extend({ type: z.literal('ConditioningField'), value: zConditioningField.optional(), @@ -459,6 +475,7 @@ export const zInputFieldValue = z.discriminatedUnion('type', [ zBooleanInputFieldValue, zImageInputFieldValue, zLatentsInputFieldValue, + zDenoiseMaskInputFieldValue, zConditioningInputFieldValue, zUNetInputFieldValue, zClipInputFieldValue, @@ -532,6 +549,11 @@ export type ImageCollectionInputFieldTemplate = InputFieldTemplateBase & { type: 'ImageCollection'; }; +export type DenoiseMaskInputFieldTemplate = InputFieldTemplateBase & { + default: undefined; + type: 'DenoiseMaskField'; +}; + export type LatentsInputFieldTemplate = InputFieldTemplateBase & { default: string; type: 'LatentsField'; diff --git a/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts b/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts index ca7674a021..7fdc73407e 100644 --- a/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts +++ b/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts @@ -8,6 +8,7 @@ import { ConditioningInputFieldTemplate, ControlInputFieldTemplate, ControlNetModelInputFieldTemplate, + DenoiseMaskInputFieldTemplate, EnumInputFieldTemplate, FieldType, FloatInputFieldTemplate, @@ -263,6 +264,19 @@ const buildImageCollectionInputFieldTemplate = ({ return template; }; +const buildDenoiseMaskInputFieldTemplate = ({ + schemaObject, + baseField, +}: BuildInputFieldArg): DenoiseMaskInputFieldTemplate => { + const template: DenoiseMaskInputFieldTemplate = { + ...baseField, + type: 'DenoiseMaskField', + default: schemaObject.default ?? undefined, + }; + + return template; +}; + const buildLatentsInputFieldTemplate = ({ schemaObject, baseField, @@ -498,6 +512,12 @@ export const buildInputFieldTemplate = ( baseField, }); } + if (fieldType === 'DenoiseMaskField') { + return buildDenoiseMaskInputFieldTemplate({ + schemaObject: fieldSchema, + baseField, + }); + } if (fieldType === 'LatentsField') { return buildLatentsInputFieldTemplate({ schemaObject: fieldSchema, diff --git a/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts b/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts index 07f8074eb9..1d06d644d1 100644 --- a/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts +++ b/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts @@ -49,6 +49,10 @@ export const buildInputFieldValue = ( fieldValue.value = []; } + if (template.type === 'DenoiseMaskField') { + fieldValue.value = undefined; + } + if (template.type === 'LatentsField') { fieldValue.value = undefined; } diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addDynamicPromptsToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addDynamicPromptsToGraph.ts index 9c71de5516..acb091a06b 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addDynamicPromptsToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addDynamicPromptsToGraph.ts @@ -63,7 +63,7 @@ export const addDynamicPromptsToGraph = ( { source: { node_id: DYNAMIC_PROMPT, - field: 'prompt_collection', + field: 'collection', }, destination: { node_id: ITERATE, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLLoRAstoGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLLoRAstoGraph.ts index a52264ca8e..61562534a5 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLLoRAstoGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLLoRAstoGraph.ts @@ -11,9 +11,11 @@ import { METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, POSITIVE_CONDITIONING, + REFINER_SEAMLESS, SDXL_CANVAS_INPAINT_GRAPH, SDXL_CANVAS_OUTPAINT_GRAPH, SDXL_MODEL_LOADER, + SEAMLESS, } from './constants'; export const addSDXLLoRAsToGraph = ( @@ -36,20 +38,25 @@ export const addSDXLLoRAsToGraph = ( | MetadataAccumulatorInvocation | undefined; + // Handle Seamless Plugs + const unetLoaderId = modelLoaderNodeId; + let clipLoaderId = modelLoaderNodeId; + if ([SEAMLESS, REFINER_SEAMLESS].includes(modelLoaderNodeId)) { + clipLoaderId = SDXL_MODEL_LOADER; + } + if (loraCount > 0) { // Remove modelLoaderNodeId unet/clip/clip2 connections to feed it to LoRAs graph.edges = graph.edges.filter( (e) => !( - e.source.node_id === modelLoaderNodeId && - ['unet'].includes(e.source.field) + e.source.node_id === unetLoaderId && ['unet'].includes(e.source.field) ) && !( - e.source.node_id === modelLoaderNodeId && - ['clip'].includes(e.source.field) + e.source.node_id === clipLoaderId && ['clip'].includes(e.source.field) ) && !( - e.source.node_id === modelLoaderNodeId && + e.source.node_id === clipLoaderId && ['clip2'].includes(e.source.field) ) ); @@ -88,7 +95,7 @@ export const addSDXLLoRAsToGraph = ( // first lora = start the lora chain, attach directly to model loader graph.edges.push({ source: { - node_id: modelLoaderNodeId, + node_id: unetLoaderId, field: 'unet', }, destination: { @@ -99,7 +106,7 @@ export const addSDXLLoRAsToGraph = ( graph.edges.push({ source: { - node_id: modelLoaderNodeId, + node_id: clipLoaderId, field: 'clip', }, destination: { @@ -110,7 +117,7 @@ export const addSDXLLoRAsToGraph = ( graph.edges.push({ source: { - node_id: modelLoaderNodeId, + node_id: clipLoaderId, field: 'clip2', }, destination: { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts index 3edea0b3c0..9474a8df4e 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts @@ -1,11 +1,15 @@ import { RootState } from 'app/store/store'; -import { MetadataAccumulatorInvocation } from 'services/api/types'; +import { + MetadataAccumulatorInvocation, + SeamlessModeInvocation, +} from 'services/api/types'; import { NonNullableGraph } from '../../types/types'; import { CANVAS_OUTPUT, LATENTS_TO_IMAGE, MASK_BLUR, METADATA_ACCUMULATOR, + REFINER_SEAMLESS, SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH, SDXL_CANVAS_INPAINT_GRAPH, SDXL_CANVAS_OUTPAINT_GRAPH, @@ -21,7 +25,8 @@ import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt'; export const addSDXLRefinerToGraph = ( state: RootState, graph: NonNullableGraph, - baseNodeId: string + baseNodeId: string, + modelLoaderNodeId?: string ): void => { const { refinerModel, @@ -33,6 +38,8 @@ export const addSDXLRefinerToGraph = ( refinerStart, } = state.sdxl; + const { seamlessXAxis, seamlessYAxis } = state.generation; + if (!refinerModel) { return; } @@ -53,6 +60,10 @@ export const addSDXLRefinerToGraph = ( metadataAccumulator.refiner_steps = refinerSteps; } + const modelLoaderId = modelLoaderNodeId + ? modelLoaderNodeId + : SDXL_MODEL_LOADER; + // Construct Style Prompt const { craftedPositiveStylePrompt, craftedNegativeStylePrompt } = craftSDXLStylePrompt(state, true); @@ -65,10 +76,7 @@ export const addSDXLRefinerToGraph = ( graph.edges = graph.edges.filter( (e) => - !( - e.source.node_id === SDXL_MODEL_LOADER && - ['vae'].includes(e.source.field) - ) + !(e.source.node_id === modelLoaderId && ['vae'].includes(e.source.field)) ); graph.nodes[SDXL_REFINER_MODEL_LOADER] = { @@ -98,8 +106,39 @@ export const addSDXLRefinerToGraph = ( denoising_end: 1, }; - graph.edges.push( - { + // Add Seamless To Refiner + if (seamlessXAxis || seamlessYAxis) { + graph.nodes[REFINER_SEAMLESS] = { + id: REFINER_SEAMLESS, + type: 'seamless', + seamless_x: seamlessXAxis, + seamless_y: seamlessYAxis, + } as SeamlessModeInvocation; + + graph.edges.push( + { + source: { + node_id: SDXL_REFINER_MODEL_LOADER, + field: 'unet', + }, + destination: { + node_id: REFINER_SEAMLESS, + field: 'unet', + }, + }, + { + source: { + node_id: REFINER_SEAMLESS, + field: 'unet', + }, + destination: { + node_id: SDXL_REFINER_DENOISE_LATENTS, + field: 'unet', + }, + } + ); + } else { + graph.edges.push({ source: { node_id: SDXL_REFINER_MODEL_LOADER, field: 'unet', @@ -108,7 +147,10 @@ export const addSDXLRefinerToGraph = ( node_id: SDXL_REFINER_DENOISE_LATENTS, field: 'unet', }, - }, + }); + } + + graph.edges.push( { source: { node_id: SDXL_REFINER_MODEL_LOADER, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSeamlessToLinearGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSeamlessToLinearGraph.ts new file mode 100644 index 0000000000..bdbaacd384 --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSeamlessToLinearGraph.ts @@ -0,0 +1,109 @@ +import { RootState } from 'app/store/store'; +import { SeamlessModeInvocation } from 'services/api/types'; +import { NonNullableGraph } from '../../types/types'; +import { + CANVAS_COHERENCE_DENOISE_LATENTS, + CANVAS_INPAINT_GRAPH, + CANVAS_OUTPAINT_GRAPH, + DENOISE_LATENTS, + SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH, + SDXL_CANVAS_INPAINT_GRAPH, + SDXL_CANVAS_OUTPAINT_GRAPH, + SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH, + SDXL_DENOISE_LATENTS, + SDXL_IMAGE_TO_IMAGE_GRAPH, + SDXL_TEXT_TO_IMAGE_GRAPH, + SEAMLESS, +} from './constants'; + +export const addSeamlessToLinearGraph = ( + state: RootState, + graph: NonNullableGraph, + modelLoaderNodeId: string +): void => { + // Remove Existing UNet Connections + const { seamlessXAxis, seamlessYAxis } = state.generation; + + graph.nodes[SEAMLESS] = { + id: SEAMLESS, + type: 'seamless', + seamless_x: seamlessXAxis, + seamless_y: seamlessYAxis, + } as SeamlessModeInvocation; + + let denoisingNodeId = DENOISE_LATENTS; + + if ( + graph.id === SDXL_TEXT_TO_IMAGE_GRAPH || + graph.id === SDXL_IMAGE_TO_IMAGE_GRAPH || + graph.id === SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH || + graph.id === SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH || + graph.id === SDXL_CANVAS_INPAINT_GRAPH || + graph.id === SDXL_CANVAS_OUTPAINT_GRAPH + ) { + denoisingNodeId = SDXL_DENOISE_LATENTS; + } + + graph.edges = graph.edges.filter( + (e) => + !( + e.source.node_id === modelLoaderNodeId && + ['unet'].includes(e.source.field) + ) && + !( + e.source.node_id === modelLoaderNodeId && + ['vae'].includes(e.source.field) + ) + ); + + graph.edges.push( + { + source: { + node_id: modelLoaderNodeId, + field: 'unet', + }, + destination: { + node_id: SEAMLESS, + field: 'unet', + }, + }, + { + source: { + node_id: modelLoaderNodeId, + field: 'vae', + }, + destination: { + node_id: SEAMLESS, + field: 'vae', + }, + }, + { + source: { + node_id: SEAMLESS, + field: 'unet', + }, + destination: { + node_id: denoisingNodeId, + field: 'unet', + }, + } + ); + + if ( + graph.id == CANVAS_INPAINT_GRAPH || + graph.id === CANVAS_OUTPAINT_GRAPH || + graph.id === SDXL_CANVAS_INPAINT_GRAPH || + graph.id === SDXL_CANVAS_OUTPAINT_GRAPH + ) { + graph.edges.push({ + source: { + node_id: SEAMLESS, + field: 'unet', + }, + destination: { + node_id: CANVAS_COHERENCE_DENOISE_LATENTS, + field: 'unet', + }, + }); + } +}; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts index 360e07062a..8fbd94e491 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts @@ -9,6 +9,7 @@ import { CANVAS_TEXT_TO_IMAGE_GRAPH, IMAGE_TO_IMAGE_GRAPH, IMAGE_TO_LATENTS, + INPAINT_CREATE_MASK, INPAINT_IMAGE, LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, @@ -30,6 +31,11 @@ export const addVAEToGraph = ( modelLoaderNodeId: string = MAIN_MODEL_LOADER ): void => { const { vae } = state.generation; + const { boundingBoxScaleMethod } = state.canvas; + + const isUsingScaledDimensions = ['auto', 'manual'].includes( + boundingBoxScaleMethod + ); const isAutoVae = !vae; const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as @@ -76,7 +82,7 @@ export const addVAEToGraph = ( field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae', }, destination: { - node_id: CANVAS_OUTPUT, + node_id: isUsingScaledDimensions ? LATENTS_TO_IMAGE : CANVAS_OUTPUT, field: 'vae', }, }); @@ -117,6 +123,16 @@ export const addVAEToGraph = ( field: 'vae', }, }, + { + source: { + node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER, + field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae', + }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'vae', + }, + }, { source: { node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts index a68aeef392..63a7eac56b 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts @@ -2,15 +2,12 @@ import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { initialGenerationState } from 'features/parameters/store/generationSlice'; -import { - ImageDTO, - ImageResizeInvocation, - ImageToLatentsInvocation, -} from 'services/api/types'; +import { ImageDTO, ImageToLatentsInvocation } from 'services/api/types'; import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addLoRAsToGraph } from './addLoRAsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; +import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { @@ -19,12 +16,14 @@ import { CLIP_SKIP, DENOISE_LATENTS, IMAGE_TO_LATENTS, + IMG2IMG_RESIZE, + LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, - RESIZE, + SEAMLESS, } from './constants'; /** @@ -43,21 +42,34 @@ export const buildCanvasImageToImageGraph = ( scheduler, steps, img2imgStrength: strength, + vaePrecision, clipSkip, shouldUseCpuNoise, shouldUseNoiseSettings, + seamlessXAxis, + seamlessYAxis, } = state.generation; // The bounding box determines width and height, not the width and height params const { width, height } = state.canvas.boundingBoxDimensions; - const { shouldAutoSave } = state.canvas; + const { + scaledBoundingBoxDimensions, + boundingBoxScaleMethod, + shouldAutoSave, + } = state.canvas; + + const isUsingScaledDimensions = ['auto', 'manual'].includes( + boundingBoxScaleMethod + ); if (!model) { log.error('No model found in state'); throw new Error('No model found in state'); } + let modelLoaderNodeId = MAIN_MODEL_LOADER; + const use_cpu = shouldUseNoiseSettings ? shouldUseCpuNoise : initialGenerationState.shouldUseCpuNoise; @@ -75,9 +87,9 @@ export const buildCanvasImageToImageGraph = ( const graph: NonNullableGraph = { id: CANVAS_IMAGE_TO_IMAGE_GRAPH, nodes: { - [MAIN_MODEL_LOADER]: { + [modelLoaderNodeId]: { type: 'main_model_loader', - id: MAIN_MODEL_LOADER, + id: modelLoaderNodeId, is_intermediate: true, model, }, @@ -104,15 +116,17 @@ export const buildCanvasImageToImageGraph = ( id: NOISE, is_intermediate: true, use_cpu, + width: !isUsingScaledDimensions + ? width + : scaledBoundingBoxDimensions.width, + height: !isUsingScaledDimensions + ? height + : scaledBoundingBoxDimensions.height, }, [IMAGE_TO_LATENTS]: { type: 'i2l', id: IMAGE_TO_LATENTS, is_intermediate: true, - // must be set manually later, bc `fit` parameter may require a resize node inserted - // image: { - // image_name: initialImage.image_name, - // }, }, [DENOISE_LATENTS]: { type: 'denoise_latents', @@ -134,7 +148,7 @@ export const buildCanvasImageToImageGraph = ( // Connect Model Loader to CLIP Skip and UNet { source: { - node_id: MAIN_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'unet', }, destination: { @@ -144,7 +158,7 @@ export const buildCanvasImageToImageGraph = ( }, { source: { - node_id: MAIN_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip', }, destination: { @@ -214,82 +228,84 @@ export const buildCanvasImageToImageGraph = ( field: 'latents', }, }, - // Decode the denoised latents to an image + ], + }; + + // Decode Latents To Image & Handle Scaled Before Processing + if (isUsingScaledDimensions) { + graph.nodes[IMG2IMG_RESIZE] = { + id: IMG2IMG_RESIZE, + type: 'img_resize', + is_intermediate: true, + image: initialImage, + width: scaledBoundingBoxDimensions.width, + height: scaledBoundingBoxDimensions.height, + }; + graph.nodes[LATENTS_TO_IMAGE] = { + id: LATENTS_TO_IMAGE, + type: 'l2i', + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }; + graph.nodes[CANVAS_OUTPUT] = { + id: CANVAS_OUTPUT, + type: 'img_resize', + is_intermediate: !shouldAutoSave, + width: width, + height: height, + }; + + graph.edges.push( + { + source: { + node_id: IMG2IMG_RESIZE, + field: 'image', + }, + destination: { + node_id: IMAGE_TO_LATENTS, + field: 'image', + }, + }, { source: { node_id: DENOISE_LATENTS, field: 'latents', }, destination: { - node_id: CANVAS_OUTPUT, + node_id: LATENTS_TO_IMAGE, field: 'latents', }, }, - ], - }; - - // handle `fit` - if (initialImage.width !== width || initialImage.height !== height) { - // The init image needs to be resized to the specified width and height before being passed to `IMAGE_TO_LATENTS` - - // Create a resize node, explicitly setting its image - const resizeNode: ImageResizeInvocation = { - id: RESIZE, - type: 'img_resize', - image: { - image_name: initialImage.image_name, - }, - is_intermediate: true, - width, - height, - }; - - graph.nodes[RESIZE] = resizeNode; - - // The `RESIZE` node then passes its image to `IMAGE_TO_LATENTS` - graph.edges.push({ - source: { node_id: RESIZE, field: 'image' }, - destination: { - node_id: IMAGE_TO_LATENTS, - field: 'image', - }, - }); - - // The `RESIZE` node also passes its width and height to `NOISE` - graph.edges.push({ - source: { node_id: RESIZE, field: 'width' }, - destination: { - node_id: NOISE, - field: 'width', - }, - }); - - graph.edges.push({ - source: { node_id: RESIZE, field: 'height' }, - destination: { - node_id: NOISE, - field: 'height', - }, - }); + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'image', + }, + } + ); } else { - // We are not resizing, so we need to set the image on the `IMAGE_TO_LATENTS` node explicitly - (graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image = { - image_name: initialImage.image_name, + graph.nodes[CANVAS_OUTPUT] = { + type: 'l2i', + id: CANVAS_OUTPUT, + is_intermediate: !shouldAutoSave, + fp32: vaePrecision === 'fp32' ? true : false, }; - // Pass the image's dimensions to the `NOISE` node + (graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image = + initialImage; + graph.edges.push({ - source: { node_id: IMAGE_TO_LATENTS, field: 'width' }, - destination: { - node_id: NOISE, - field: 'width', + source: { + node_id: DENOISE_LATENTS, + field: 'latents', }, - }); - graph.edges.push({ - source: { node_id: IMAGE_TO_LATENTS, field: 'height' }, destination: { - node_id: NOISE, - field: 'height', + node_id: CANVAS_OUTPUT, + field: 'latents', }, }); } @@ -300,8 +316,10 @@ export const buildCanvasImageToImageGraph = ( type: 'metadata_accumulator', generation_mode: 'img2img', cfg_scale, - height, - width, + width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width, + height: !isUsingScaledDimensions + ? height + : scaledBoundingBoxDimensions.height, positive_prompt: '', // set in addDynamicPromptsToGraph negative_prompt: negativePrompt, model, @@ -328,11 +346,17 @@ export const buildCanvasImageToImageGraph = ( }, }); + // Add Seamless To Graph + if (seamlessXAxis || seamlessYAxis) { + addSeamlessToLinearGraph(state, graph, modelLoaderNodeId); + modelLoaderNodeId = SEAMLESS; + } + // add LoRA support addLoRAsToGraph(state, graph, DENOISE_LATENTS); // optionally add custom VAE - addVAEToGraph(state, graph, MAIN_MODEL_LOADER); + addVAEToGraph(state, graph, modelLoaderNodeId); // add dynamic prompts - also sets up core iteration and seed addDynamicPromptsToGraph(state, graph); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts index 4b017340cb..6eafd1fc06 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts @@ -2,6 +2,7 @@ import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { + CreateDenoiseMaskInvocation, ImageBlurInvocation, ImageDTO, ImageToLatentsInvocation, @@ -12,16 +13,18 @@ import { import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addLoRAsToGraph } from './addLoRAsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; +import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { - CANVAS_INPAINT_GRAPH, - CANVAS_OUTPUT, CANVAS_COHERENCE_DENOISE_LATENTS, CANVAS_COHERENCE_NOISE, CANVAS_COHERENCE_NOISE_INCREMENT, + CANVAS_INPAINT_GRAPH, + CANVAS_OUTPUT, CLIP_SKIP, DENOISE_LATENTS, + INPAINT_CREATE_MASK, INPAINT_IMAGE, INPAINT_IMAGE_RESIZE_DOWN, INPAINT_IMAGE_RESIZE_UP, @@ -36,6 +39,7 @@ import { POSITIVE_CONDITIONING, RANDOM_INT, RANGE_OF_SIZE, + SEAMLESS, } from './constants'; /** @@ -66,6 +70,8 @@ export const buildCanvasInpaintGraph = ( canvasCoherenceSteps, canvasCoherenceStrength, clipSkip, + seamlessXAxis, + seamlessYAxis, } = state.generation; if (!model) { @@ -83,6 +89,8 @@ export const buildCanvasInpaintGraph = ( shouldAutoSave, } = state.canvas; + let modelLoaderNodeId = MAIN_MODEL_LOADER; + const use_cpu = shouldUseNoiseSettings ? shouldUseCpuNoise : shouldUseCpuNoise; @@ -90,9 +98,9 @@ export const buildCanvasInpaintGraph = ( const graph: NonNullableGraph = { id: CANVAS_INPAINT_GRAPH, nodes: { - [MAIN_MODEL_LOADER]: { + [modelLoaderNodeId]: { type: 'main_model_loader', - id: MAIN_MODEL_LOADER, + id: modelLoaderNodeId, is_intermediate: true, model, }, @@ -127,6 +135,12 @@ export const buildCanvasInpaintGraph = ( is_intermediate: true, fp32: vaePrecision === 'fp32' ? true : false, }, + [INPAINT_CREATE_MASK]: { + type: 'create_denoise_mask', + id: INPAINT_CREATE_MASK, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }, [NOISE]: { type: 'noise', id: NOISE, @@ -196,7 +210,7 @@ export const buildCanvasInpaintGraph = ( // Connect Model Loader to CLIP Skip and UNet { source: { - node_id: MAIN_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'unet', }, destination: { @@ -206,7 +220,7 @@ export const buildCanvasInpaintGraph = ( }, { source: { - node_id: MAIN_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip', }, destination: { @@ -276,16 +290,27 @@ export const buildCanvasInpaintGraph = ( field: 'latents', }, }, + // Create Inpaint Mask { source: { node_id: MASK_BLUR, field: 'image', }, destination: { - node_id: DENOISE_LATENTS, + node_id: INPAINT_CREATE_MASK, field: 'mask', }, }, + { + source: { + node_id: INPAINT_CREATE_MASK, + field: 'denoise_mask', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'denoise_mask', + }, + }, // Iterate { source: { @@ -330,7 +355,7 @@ export const buildCanvasInpaintGraph = ( }, { source: { - node_id: MAIN_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'unet', }, destination: { @@ -459,6 +484,16 @@ export const buildCanvasInpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: INPAINT_IMAGE_RESIZE_UP, + field: 'image', + }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'image', + }, + }, // Color Correct The Inpainted Result { source: { @@ -516,6 +551,10 @@ export const buildCanvasInpaintGraph = ( ...(graph.nodes[MASK_BLUR] as ImageBlurInvocation), image: canvasMaskImage, }; + graph.nodes[INPAINT_CREATE_MASK] = { + ...(graph.nodes[INPAINT_CREATE_MASK] as CreateDenoiseMaskInvocation), + image: canvasInitImage, + }; graph.edges.push( // Color Correct The Inpainted Result @@ -562,11 +601,17 @@ export const buildCanvasInpaintGraph = ( (graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed; } + // Add Seamless To Graph + if (seamlessXAxis || seamlessYAxis) { + addSeamlessToLinearGraph(state, graph, modelLoaderNodeId); + modelLoaderNodeId = SEAMLESS; + } + // Add VAE - addVAEToGraph(state, graph, MAIN_MODEL_LOADER); + addVAEToGraph(state, graph, modelLoaderNodeId); // add LoRA support - addLoRAsToGraph(state, graph, DENOISE_LATENTS, MAIN_MODEL_LOADER); + addLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId); // add controlnet, mutating `graph` addControlNetToLinearGraph(state, graph, DENOISE_LATENTS); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts index 9f424d3dcf..aec9d1c035 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts @@ -14,16 +14,18 @@ import { import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addLoRAsToGraph } from './addLoRAsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; +import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { - CANVAS_OUTPAINT_GRAPH, - CANVAS_OUTPUT, CANVAS_COHERENCE_DENOISE_LATENTS, CANVAS_COHERENCE_NOISE, CANVAS_COHERENCE_NOISE_INCREMENT, + CANVAS_OUTPAINT_GRAPH, + CANVAS_OUTPUT, CLIP_SKIP, DENOISE_LATENTS, + INPAINT_CREATE_MASK, INPAINT_IMAGE, INPAINT_IMAGE_RESIZE_DOWN, INPAINT_IMAGE_RESIZE_UP, @@ -42,6 +44,7 @@ import { POSITIVE_CONDITIONING, RANDOM_INT, RANGE_OF_SIZE, + SEAMLESS, } from './constants'; /** @@ -74,6 +77,8 @@ export const buildCanvasOutpaintGraph = ( tileSize, infillMethod, clipSkip, + seamlessXAxis, + seamlessYAxis, } = state.generation; if (!model) { @@ -91,6 +96,8 @@ export const buildCanvasOutpaintGraph = ( shouldAutoSave, } = state.canvas; + let modelLoaderNodeId = MAIN_MODEL_LOADER; + const use_cpu = shouldUseNoiseSettings ? shouldUseCpuNoise : shouldUseCpuNoise; @@ -98,9 +105,9 @@ export const buildCanvasOutpaintGraph = ( const graph: NonNullableGraph = { id: CANVAS_OUTPAINT_GRAPH, nodes: { - [MAIN_MODEL_LOADER]: { + [modelLoaderNodeId]: { type: 'main_model_loader', - id: MAIN_MODEL_LOADER, + id: modelLoaderNodeId, is_intermediate: true, model, }, @@ -153,6 +160,12 @@ export const buildCanvasOutpaintGraph = ( use_cpu, is_intermediate: true, }, + [INPAINT_CREATE_MASK]: { + type: 'create_denoise_mask', + id: INPAINT_CREATE_MASK, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }, [DENOISE_LATENTS]: { type: 'denoise_latents', id: DENOISE_LATENTS, @@ -215,7 +228,7 @@ export const buildCanvasOutpaintGraph = ( // Connect Model Loader To UNet & Clip Skip { source: { - node_id: MAIN_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'unet', }, destination: { @@ -225,7 +238,7 @@ export const buildCanvasOutpaintGraph = ( }, { source: { - node_id: MAIN_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip', }, destination: { @@ -317,16 +330,27 @@ export const buildCanvasOutpaintGraph = ( field: 'latents', }, }, + // Create Inpaint Mask { source: { node_id: MASK_BLUR, field: 'image', }, destination: { - node_id: DENOISE_LATENTS, + node_id: INPAINT_CREATE_MASK, field: 'mask', }, }, + { + source: { + node_id: INPAINT_CREATE_MASK, + field: 'denoise_mask', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'denoise_mask', + }, + }, // Iterate { source: { @@ -371,7 +395,7 @@ export const buildCanvasOutpaintGraph = ( }, { source: { - node_id: MAIN_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'unet', }, destination: { @@ -522,6 +546,16 @@ export const buildCanvasOutpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'image', + }, + }, // Take combined mask and resize and then blur { source: { @@ -640,6 +674,16 @@ export const buildCanvasOutpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'image', + }, + }, // Color Correct The Inpainted Result { source: { @@ -694,11 +738,17 @@ export const buildCanvasOutpaintGraph = ( (graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed; } + // Add Seamless To Graph + if (seamlessXAxis || seamlessYAxis) { + addSeamlessToLinearGraph(state, graph, modelLoaderNodeId); + modelLoaderNodeId = SEAMLESS; + } + // Add VAE - addVAEToGraph(state, graph, MAIN_MODEL_LOADER); + addVAEToGraph(state, graph, modelLoaderNodeId); // add LoRA support - addLoRAsToGraph(state, graph, DENOISE_LATENTS, MAIN_MODEL_LOADER); + addLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId); // add controlnet, mutating `graph` addControlNetToLinearGraph(state, graph, DENOISE_LATENTS); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts index ef32943bc8..e5a14c3f8a 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts @@ -2,29 +2,29 @@ import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { initialGenerationState } from 'features/parameters/store/generationSlice'; -import { - ImageDTO, - ImageResizeInvocation, - ImageToLatentsInvocation, -} from 'services/api/types'; +import { ImageDTO, ImageToLatentsInvocation } from 'services/api/types'; import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; +import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { CANVAS_OUTPUT, IMAGE_TO_LATENTS, + IMG2IMG_RESIZE, + LATENTS_TO_IMAGE, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, - RESIZE, + REFINER_SEAMLESS, SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER, + SEAMLESS, } from './constants'; import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt'; @@ -47,6 +47,8 @@ export const buildCanvasSDXLImageToImageGraph = ( clipSkip, shouldUseCpuNoise, shouldUseNoiseSettings, + seamlessXAxis, + seamlessYAxis, } = state.generation; const { @@ -59,13 +61,24 @@ export const buildCanvasSDXLImageToImageGraph = ( // The bounding box determines width and height, not the width and height params const { width, height } = state.canvas.boundingBoxDimensions; - const { shouldAutoSave } = state.canvas; + const { + scaledBoundingBoxDimensions, + boundingBoxScaleMethod, + shouldAutoSave, + } = state.canvas; + + const isUsingScaledDimensions = ['auto', 'manual'].includes( + boundingBoxScaleMethod + ); if (!model) { log.error('No model found in state'); throw new Error('No model found in state'); } + // Model Loader ID + let modelLoaderNodeId = SDXL_MODEL_LOADER; + const use_cpu = shouldUseNoiseSettings ? shouldUseCpuNoise : initialGenerationState.shouldUseCpuNoise; @@ -87,9 +100,9 @@ export const buildCanvasSDXLImageToImageGraph = ( const graph: NonNullableGraph = { id: SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH, nodes: { - [SDXL_MODEL_LOADER]: { + [modelLoaderNodeId]: { type: 'sdxl_model_loader', - id: SDXL_MODEL_LOADER, + id: modelLoaderNodeId, model, }, [POSITIVE_CONDITIONING]: { @@ -109,16 +122,18 @@ export const buildCanvasSDXLImageToImageGraph = ( id: NOISE, is_intermediate: true, use_cpu, + width: !isUsingScaledDimensions + ? width + : scaledBoundingBoxDimensions.width, + height: !isUsingScaledDimensions + ? height + : scaledBoundingBoxDimensions.height, }, [IMAGE_TO_LATENTS]: { type: 'i2l', id: IMAGE_TO_LATENTS, is_intermediate: true, fp32: vaePrecision === 'fp32' ? true : false, - // must be set manually later, bc `fit` parameter may require a resize node inserted - // image: { - // image_name: initialImage.image_name, - // }, }, [SDXL_DENOISE_LATENTS]: { type: 'denoise_latents', @@ -132,18 +147,12 @@ export const buildCanvasSDXLImageToImageGraph = ( : 1 - strength, denoising_end: shouldUseSDXLRefiner ? refinerStart : 1, }, - [CANVAS_OUTPUT]: { - type: 'l2i', - id: CANVAS_OUTPUT, - is_intermediate: !shouldAutoSave, - fp32: vaePrecision === 'fp32' ? true : false, - }, }, edges: [ // Connect Model Loader To UNet & CLIP { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'unet', }, destination: { @@ -153,7 +162,7 @@ export const buildCanvasSDXLImageToImageGraph = ( }, { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip', }, destination: { @@ -163,7 +172,7 @@ export const buildCanvasSDXLImageToImageGraph = ( }, { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip2', }, destination: { @@ -173,7 +182,7 @@ export const buildCanvasSDXLImageToImageGraph = ( }, { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip', }, destination: { @@ -183,7 +192,7 @@ export const buildCanvasSDXLImageToImageGraph = ( }, { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip2', }, destination: { @@ -232,82 +241,84 @@ export const buildCanvasSDXLImageToImageGraph = ( field: 'latents', }, }, - // Decode denoised latents to an image + ], + }; + + // Decode Latents To Image & Handle Scaled Before Processing + if (isUsingScaledDimensions) { + graph.nodes[IMG2IMG_RESIZE] = { + id: IMG2IMG_RESIZE, + type: 'img_resize', + is_intermediate: true, + image: initialImage, + width: scaledBoundingBoxDimensions.width, + height: scaledBoundingBoxDimensions.height, + }; + graph.nodes[LATENTS_TO_IMAGE] = { + id: LATENTS_TO_IMAGE, + type: 'l2i', + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }; + graph.nodes[CANVAS_OUTPUT] = { + id: CANVAS_OUTPUT, + type: 'img_resize', + is_intermediate: !shouldAutoSave, + width: width, + height: height, + }; + + graph.edges.push( + { + source: { + node_id: IMG2IMG_RESIZE, + field: 'image', + }, + destination: { + node_id: IMAGE_TO_LATENTS, + field: 'image', + }, + }, { source: { node_id: SDXL_DENOISE_LATENTS, field: 'latents', }, destination: { - node_id: CANVAS_OUTPUT, + node_id: LATENTS_TO_IMAGE, field: 'latents', }, }, - ], - }; - - // handle `fit` - if (initialImage.width !== width || initialImage.height !== height) { - // The init image needs to be resized to the specified width and height before being passed to `IMAGE_TO_LATENTS` - - // Create a resize node, explicitly setting its image - const resizeNode: ImageResizeInvocation = { - id: RESIZE, - type: 'img_resize', - image: { - image_name: initialImage.image_name, - }, - is_intermediate: true, - width, - height, - }; - - graph.nodes[RESIZE] = resizeNode; - - // The `RESIZE` node then passes its image to `IMAGE_TO_LATENTS` - graph.edges.push({ - source: { node_id: RESIZE, field: 'image' }, - destination: { - node_id: IMAGE_TO_LATENTS, - field: 'image', - }, - }); - - // The `RESIZE` node also passes its width and height to `NOISE` - graph.edges.push({ - source: { node_id: RESIZE, field: 'width' }, - destination: { - node_id: NOISE, - field: 'width', - }, - }); - - graph.edges.push({ - source: { node_id: RESIZE, field: 'height' }, - destination: { - node_id: NOISE, - field: 'height', - }, - }); + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'image', + }, + } + ); } else { - // We are not resizing, so we need to set the image on the `IMAGE_TO_LATENTS` node explicitly - (graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image = { - image_name: initialImage.image_name, + graph.nodes[CANVAS_OUTPUT] = { + type: 'l2i', + id: CANVAS_OUTPUT, + is_intermediate: !shouldAutoSave, + fp32: vaePrecision === 'fp32' ? true : false, }; - // Pass the image's dimensions to the `NOISE` node + (graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image = + initialImage; + graph.edges.push({ - source: { node_id: IMAGE_TO_LATENTS, field: 'width' }, - destination: { - node_id: NOISE, - field: 'width', + source: { + node_id: SDXL_DENOISE_LATENTS, + field: 'latents', }, - }); - graph.edges.push({ - source: { node_id: IMAGE_TO_LATENTS, field: 'height' }, destination: { - node_id: NOISE, - field: 'height', + node_id: CANVAS_OUTPUT, + field: 'latents', }, }); } @@ -318,8 +329,10 @@ export const buildCanvasSDXLImageToImageGraph = ( type: 'metadata_accumulator', generation_mode: 'img2img', cfg_scale, - height, - width, + width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width, + height: !isUsingScaledDimensions + ? height + : scaledBoundingBoxDimensions.height, positive_prompt: '', // set in addDynamicPromptsToGraph negative_prompt: negativePrompt, model, @@ -346,16 +359,23 @@ export const buildCanvasSDXLImageToImageGraph = ( }, }); - // add LoRA support - addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER); + // Add Seamless To Graph + if (seamlessXAxis || seamlessYAxis) { + addSeamlessToLinearGraph(state, graph, modelLoaderNodeId); + modelLoaderNodeId = SEAMLESS; + } // Add Refiner if enabled if (shouldUseSDXLRefiner) { addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS); + modelLoaderNodeId = REFINER_SEAMLESS; } // optionally add custom VAE - addVAEToGraph(state, graph, SDXL_MODEL_LOADER); + addVAEToGraph(state, graph, modelLoaderNodeId); + + // add LoRA support + addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId); // add dynamic prompts - also sets up core iteration and seed addDynamicPromptsToGraph(state, graph); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts index f60c710c64..4b2ffdb7ca 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts @@ -2,6 +2,7 @@ import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { + CreateDenoiseMaskInvocation, ImageBlurInvocation, ImageDTO, ImageToLatentsInvocation, @@ -13,13 +14,15 @@ import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; +import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { - CANVAS_OUTPUT, CANVAS_COHERENCE_DENOISE_LATENTS, CANVAS_COHERENCE_NOISE, CANVAS_COHERENCE_NOISE_INCREMENT, + CANVAS_OUTPUT, + INPAINT_CREATE_MASK, INPAINT_IMAGE, INPAINT_IMAGE_RESIZE_DOWN, INPAINT_IMAGE_RESIZE_UP, @@ -33,9 +36,11 @@ import { POSITIVE_CONDITIONING, RANDOM_INT, RANGE_OF_SIZE, + REFINER_SEAMLESS, SDXL_CANVAS_INPAINT_GRAPH, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER, + SEAMLESS, } from './constants'; import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt'; @@ -65,6 +70,8 @@ export const buildCanvasSDXLInpaintGraph = ( maskBlurMethod, canvasCoherenceSteps, canvasCoherenceStrength, + seamlessXAxis, + seamlessYAxis, } = state.generation; const { @@ -89,6 +96,8 @@ export const buildCanvasSDXLInpaintGraph = ( shouldAutoSave, } = state.canvas; + let modelLoaderNodeId = SDXL_MODEL_LOADER; + const use_cpu = shouldUseNoiseSettings ? shouldUseCpuNoise : shouldUseCpuNoise; @@ -100,9 +109,9 @@ export const buildCanvasSDXLInpaintGraph = ( const graph: NonNullableGraph = { id: SDXL_CANVAS_INPAINT_GRAPH, nodes: { - [SDXL_MODEL_LOADER]: { + [modelLoaderNodeId]: { type: 'sdxl_model_loader', - id: SDXL_MODEL_LOADER, + id: modelLoaderNodeId, model, }, [POSITIVE_CONDITIONING]: { @@ -136,6 +145,12 @@ export const buildCanvasSDXLInpaintGraph = ( use_cpu, is_intermediate: true, }, + [INPAINT_CREATE_MASK]: { + type: 'create_denoise_mask', + id: INPAINT_CREATE_MASK, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }, [SDXL_DENOISE_LATENTS]: { type: 'denoise_latents', id: SDXL_DENOISE_LATENTS, @@ -201,7 +216,7 @@ export const buildCanvasSDXLInpaintGraph = ( // Connect Model Loader to UNet and CLIP { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'unet', }, destination: { @@ -211,7 +226,7 @@ export const buildCanvasSDXLInpaintGraph = ( }, { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip', }, destination: { @@ -221,7 +236,7 @@ export const buildCanvasSDXLInpaintGraph = ( }, { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip2', }, destination: { @@ -231,7 +246,7 @@ export const buildCanvasSDXLInpaintGraph = ( }, { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip', }, destination: { @@ -241,7 +256,7 @@ export const buildCanvasSDXLInpaintGraph = ( }, { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip2', }, destination: { @@ -290,16 +305,27 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'latents', }, }, + // Create Inpaint Mask { source: { node_id: MASK_BLUR, field: 'image', }, destination: { - node_id: SDXL_DENOISE_LATENTS, + node_id: INPAINT_CREATE_MASK, field: 'mask', }, }, + { + source: { + node_id: INPAINT_CREATE_MASK, + field: 'denoise_mask', + }, + destination: { + node_id: SDXL_DENOISE_LATENTS, + field: 'denoise_mask', + }, + }, // Iterate { source: { @@ -344,7 +370,7 @@ export const buildCanvasSDXLInpaintGraph = ( }, { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'unet', }, destination: { @@ -473,6 +499,16 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: INPAINT_IMAGE_RESIZE_UP, + field: 'image', + }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'image', + }, + }, // Color Correct The Inpainted Result { source: { @@ -530,6 +566,10 @@ export const buildCanvasSDXLInpaintGraph = ( ...(graph.nodes[MASK_BLUR] as ImageBlurInvocation), image: canvasMaskImage, }; + graph.nodes[INPAINT_CREATE_MASK] = { + ...(graph.nodes[INPAINT_CREATE_MASK] as CreateDenoiseMaskInvocation), + image: canvasInitImage, + }; graph.edges.push( // Color Correct The Inpainted Result @@ -576,16 +616,28 @@ export const buildCanvasSDXLInpaintGraph = ( (graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed; } + // Add Seamless To Graph + if (seamlessXAxis || seamlessYAxis) { + addSeamlessToLinearGraph(state, graph, modelLoaderNodeId); + modelLoaderNodeId = SEAMLESS; + } + // Add Refiner if enabled if (shouldUseSDXLRefiner) { - addSDXLRefinerToGraph(state, graph, CANVAS_COHERENCE_DENOISE_LATENTS); + addSDXLRefinerToGraph( + state, + graph, + CANVAS_COHERENCE_DENOISE_LATENTS, + modelLoaderNodeId + ); + modelLoaderNodeId = REFINER_SEAMLESS; } // optionally add custom VAE - addVAEToGraph(state, graph, SDXL_MODEL_LOADER); + addVAEToGraph(state, graph, modelLoaderNodeId); // add LoRA support - addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER); + addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId); // add controlnet, mutating `graph` addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts index 7e98c37233..0c075beb4a 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts @@ -15,13 +15,15 @@ import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; +import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { - CANVAS_OUTPUT, CANVAS_COHERENCE_DENOISE_LATENTS, CANVAS_COHERENCE_NOISE, CANVAS_COHERENCE_NOISE_INCREMENT, + CANVAS_OUTPUT, + INPAINT_CREATE_MASK, INPAINT_IMAGE, INPAINT_IMAGE_RESIZE_DOWN, INPAINT_IMAGE_RESIZE_UP, @@ -39,9 +41,11 @@ import { POSITIVE_CONDITIONING, RANDOM_INT, RANGE_OF_SIZE, + REFINER_SEAMLESS, SDXL_CANVAS_OUTPAINT_GRAPH, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER, + SEAMLESS, } from './constants'; import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt'; @@ -73,6 +77,8 @@ export const buildCanvasSDXLOutpaintGraph = ( canvasCoherenceStrength, tileSize, infillMethod, + seamlessXAxis, + seamlessYAxis, } = state.generation; const { @@ -97,6 +103,8 @@ export const buildCanvasSDXLOutpaintGraph = ( shouldAutoSave, } = state.canvas; + let modelLoaderNodeId = SDXL_MODEL_LOADER; + const use_cpu = shouldUseNoiseSettings ? shouldUseCpuNoise : shouldUseCpuNoise; @@ -156,6 +164,12 @@ export const buildCanvasSDXLOutpaintGraph = ( use_cpu, is_intermediate: true, }, + [INPAINT_CREATE_MASK]: { + type: 'create_denoise_mask', + id: INPAINT_CREATE_MASK, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }, [SDXL_DENOISE_LATENTS]: { type: 'denoise_latents', id: SDXL_DENOISE_LATENTS, @@ -331,16 +345,27 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'latents', }, }, + // Create Inpaint Mask { source: { node_id: MASK_BLUR, field: 'image', }, destination: { - node_id: SDXL_DENOISE_LATENTS, + node_id: INPAINT_CREATE_MASK, field: 'mask', }, }, + { + source: { + node_id: INPAINT_CREATE_MASK, + field: 'denoise_mask', + }, + destination: { + node_id: SDXL_DENOISE_LATENTS, + field: 'denoise_mask', + }, + }, // Iterate { source: { @@ -537,6 +562,16 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'image', + }, + }, // Take combined mask and resize and then blur { source: { @@ -655,6 +690,16 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_CREATE_MASK, + field: 'image', + }, + }, // Color Correct The Inpainted Result { source: { @@ -709,16 +754,28 @@ export const buildCanvasSDXLOutpaintGraph = ( (graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed; } + // Add Seamless To Graph + if (seamlessXAxis || seamlessYAxis) { + addSeamlessToLinearGraph(state, graph, modelLoaderNodeId); + modelLoaderNodeId = SEAMLESS; + } + // Add Refiner if enabled if (shouldUseSDXLRefiner) { - addSDXLRefinerToGraph(state, graph, CANVAS_COHERENCE_DENOISE_LATENTS); + addSDXLRefinerToGraph( + state, + graph, + CANVAS_COHERENCE_DENOISE_LATENTS, + modelLoaderNodeId + ); + modelLoaderNodeId = REFINER_SEAMLESS; } // optionally add custom VAE - addVAEToGraph(state, graph, SDXL_MODEL_LOADER); + addVAEToGraph(state, graph, modelLoaderNodeId); // add LoRA support - addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER); + addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId); // add controlnet, mutating `graph` addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts index e79e08ba41..afefa53645 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts @@ -11,18 +11,22 @@ import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; +import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { CANVAS_OUTPUT, + LATENTS_TO_IMAGE, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, ONNX_MODEL_LOADER, POSITIVE_CONDITIONING, + REFINER_SEAMLESS, SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER, + SEAMLESS, } from './constants'; import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt'; @@ -44,12 +48,22 @@ export const buildCanvasSDXLTextToImageGraph = ( clipSkip, shouldUseCpuNoise, shouldUseNoiseSettings, + seamlessXAxis, + seamlessYAxis, } = state.generation; // The bounding box determines width and height, not the width and height params const { width, height } = state.canvas.boundingBoxDimensions; - const { shouldAutoSave } = state.canvas; + const { + scaledBoundingBoxDimensions, + boundingBoxScaleMethod, + shouldAutoSave, + } = state.canvas; + + const isUsingScaledDimensions = ['auto', 'manual'].includes( + boundingBoxScaleMethod + ); const { shouldUseSDXLRefiner, refinerStart, shouldConcatSDXLStylePrompt } = state.sdxl; @@ -65,7 +79,7 @@ export const buildCanvasSDXLTextToImageGraph = ( const isUsingOnnxModel = model.model_type === 'onnx'; - const modelLoaderNodeId = isUsingOnnxModel + let modelLoaderNodeId = isUsingOnnxModel ? ONNX_MODEL_LOADER : SDXL_MODEL_LOADER; @@ -136,17 +150,15 @@ export const buildCanvasSDXLTextToImageGraph = ( type: 'noise', id: NOISE, is_intermediate: true, - width, - height, + width: !isUsingScaledDimensions + ? width + : scaledBoundingBoxDimensions.width, + height: !isUsingScaledDimensions + ? height + : scaledBoundingBoxDimensions.height, use_cpu, }, [t2lNode.id]: t2lNode, - [CANVAS_OUTPUT]: { - type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', - id: CANVAS_OUTPUT, - is_intermediate: !shouldAutoSave, - fp32: vaePrecision === 'fp32' ? true : false, - }, }, edges: [ // Connect Model Loader to UNet and CLIP @@ -231,19 +243,67 @@ export const buildCanvasSDXLTextToImageGraph = ( field: 'noise', }, }, - // Decode Denoised Latents To Image + ], + }; + + // Decode Latents To Image & Handle Scaled Before Processing + if (isUsingScaledDimensions) { + graph.nodes[LATENTS_TO_IMAGE] = { + id: LATENTS_TO_IMAGE, + type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }; + + graph.nodes[CANVAS_OUTPUT] = { + id: CANVAS_OUTPUT, + type: 'img_resize', + is_intermediate: !shouldAutoSave, + width: width, + height: height, + }; + + graph.edges.push( { source: { node_id: SDXL_DENOISE_LATENTS, field: 'latents', }, destination: { - node_id: CANVAS_OUTPUT, + node_id: LATENTS_TO_IMAGE, field: 'latents', }, }, - ], - }; + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'image', + }, + } + ); + } else { + graph.nodes[CANVAS_OUTPUT] = { + type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', + id: CANVAS_OUTPUT, + is_intermediate: !shouldAutoSave, + fp32: vaePrecision === 'fp32' ? true : false, + }; + + graph.edges.push({ + source: { + node_id: SDXL_DENOISE_LATENTS, + field: 'latents', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'latents', + }, + }); + } // add metadata accumulator, which is only mostly populated - some fields are added later graph.nodes[METADATA_ACCUMULATOR] = { @@ -251,8 +311,10 @@ export const buildCanvasSDXLTextToImageGraph = ( type: 'metadata_accumulator', generation_mode: 'txt2img', cfg_scale, - height, - width, + width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width, + height: !isUsingScaledDimensions + ? height + : scaledBoundingBoxDimensions.height, positive_prompt: '', // set in addDynamicPromptsToGraph negative_prompt: negativePrompt, model, @@ -277,9 +339,16 @@ export const buildCanvasSDXLTextToImageGraph = ( }, }); + // Add Seamless To Graph + if (seamlessXAxis || seamlessYAxis) { + addSeamlessToLinearGraph(state, graph, modelLoaderNodeId); + modelLoaderNodeId = SEAMLESS; + } + // Add Refiner if enabled if (shouldUseSDXLRefiner) { addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS); + modelLoaderNodeId = REFINER_SEAMLESS; } // add LoRA support diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts index 4548a7e099..b25c266d66 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts @@ -10,6 +10,7 @@ import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addLoRAsToGraph } from './addLoRAsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; +import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { @@ -17,12 +18,14 @@ import { CANVAS_TEXT_TO_IMAGE_GRAPH, CLIP_SKIP, DENOISE_LATENTS, + LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, ONNX_MODEL_LOADER, POSITIVE_CONDITIONING, + SEAMLESS, } from './constants'; /** @@ -39,15 +42,26 @@ export const buildCanvasTextToImageGraph = ( cfgScale: cfg_scale, scheduler, steps, + vaePrecision, clipSkip, shouldUseCpuNoise, shouldUseNoiseSettings, + seamlessXAxis, + seamlessYAxis, } = state.generation; // The bounding box determines width and height, not the width and height params const { width, height } = state.canvas.boundingBoxDimensions; - const { shouldAutoSave } = state.canvas; + const { + scaledBoundingBoxDimensions, + boundingBoxScaleMethod, + shouldAutoSave, + } = state.canvas; + + const isUsingScaledDimensions = ['auto', 'manual'].includes( + boundingBoxScaleMethod + ); if (!model) { log.error('No model found in state'); @@ -60,7 +74,7 @@ export const buildCanvasTextToImageGraph = ( const isUsingOnnxModel = model.model_type === 'onnx'; - const modelLoaderNodeId = isUsingOnnxModel + let modelLoaderNodeId = isUsingOnnxModel ? ONNX_MODEL_LOADER : MAIN_MODEL_LOADER; @@ -131,16 +145,15 @@ export const buildCanvasTextToImageGraph = ( type: 'noise', id: NOISE, is_intermediate: true, - width, - height, + width: !isUsingScaledDimensions + ? width + : scaledBoundingBoxDimensions.width, + height: !isUsingScaledDimensions + ? height + : scaledBoundingBoxDimensions.height, use_cpu, }, [t2lNode.id]: t2lNode, - [CANVAS_OUTPUT]: { - type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', - id: CANVAS_OUTPUT, - is_intermediate: !shouldAutoSave, - }, }, edges: [ // Connect Model Loader to UNet & CLIP Skip @@ -216,19 +229,67 @@ export const buildCanvasTextToImageGraph = ( field: 'noise', }, }, - // Decode denoised latents to image + ], + }; + + // Decode Latents To Image & Handle Scaled Before Processing + if (isUsingScaledDimensions) { + graph.nodes[LATENTS_TO_IMAGE] = { + id: LATENTS_TO_IMAGE, + type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }; + + graph.nodes[CANVAS_OUTPUT] = { + id: CANVAS_OUTPUT, + type: 'img_resize', + is_intermediate: !shouldAutoSave, + width: width, + height: height, + }; + + graph.edges.push( { source: { node_id: DENOISE_LATENTS, field: 'latents', }, destination: { - node_id: CANVAS_OUTPUT, + node_id: LATENTS_TO_IMAGE, field: 'latents', }, }, - ], - }; + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'image', + }, + } + ); + } else { + graph.nodes[CANVAS_OUTPUT] = { + type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', + id: CANVAS_OUTPUT, + is_intermediate: !shouldAutoSave, + fp32: vaePrecision === 'fp32' ? true : false, + }; + + graph.edges.push({ + source: { + node_id: DENOISE_LATENTS, + field: 'latents', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'latents', + }, + }); + } // add metadata accumulator, which is only mostly populated - some fields are added later graph.nodes[METADATA_ACCUMULATOR] = { @@ -236,8 +297,10 @@ export const buildCanvasTextToImageGraph = ( type: 'metadata_accumulator', generation_mode: 'txt2img', cfg_scale, - height, - width, + width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width, + height: !isUsingScaledDimensions + ? height + : scaledBoundingBoxDimensions.height, positive_prompt: '', // set in addDynamicPromptsToGraph negative_prompt: negativePrompt, model, @@ -262,6 +325,12 @@ export const buildCanvasTextToImageGraph = ( }, }); + // Add Seamless To Graph + if (seamlessXAxis || seamlessYAxis) { + addSeamlessToLinearGraph(state, graph, modelLoaderNodeId); + modelLoaderNodeId = SEAMLESS; + } + // optionally add custom VAE addVAEToGraph(state, graph, modelLoaderNodeId); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearImageToImageGraph.ts index 982a09357f..7c1764ae65 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearImageToImageGraph.ts @@ -10,6 +10,7 @@ import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addLoRAsToGraph } from './addLoRAsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; +import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { @@ -24,6 +25,7 @@ import { NOISE, POSITIVE_CONDITIONING, RESIZE, + SEAMLESS, } from './constants'; /** @@ -49,6 +51,8 @@ export const buildLinearImageToImageGraph = ( shouldUseCpuNoise, shouldUseNoiseSettings, vaePrecision, + seamlessXAxis, + seamlessYAxis, } = state.generation; // TODO: add batch functionality @@ -80,6 +84,8 @@ export const buildLinearImageToImageGraph = ( throw new Error('No model found in state'); } + let modelLoaderNodeId = MAIN_MODEL_LOADER; + const use_cpu = shouldUseNoiseSettings ? shouldUseCpuNoise : initialGenerationState.shouldUseCpuNoise; @@ -88,9 +94,9 @@ export const buildLinearImageToImageGraph = ( const graph: NonNullableGraph = { id: IMAGE_TO_IMAGE_GRAPH, nodes: { - [MAIN_MODEL_LOADER]: { + [modelLoaderNodeId]: { type: 'main_model_loader', - id: MAIN_MODEL_LOADER, + id: modelLoaderNodeId, model, }, [CLIP_SKIP]: { @@ -141,7 +147,7 @@ export const buildLinearImageToImageGraph = ( // Connect Model Loader to UNet and CLIP Skip { source: { - node_id: MAIN_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'unet', }, destination: { @@ -151,7 +157,7 @@ export const buildLinearImageToImageGraph = ( }, { source: { - node_id: MAIN_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip', }, destination: { @@ -338,11 +344,17 @@ export const buildLinearImageToImageGraph = ( }, }); + // Add Seamless To Graph + if (seamlessXAxis || seamlessYAxis) { + addSeamlessToLinearGraph(state, graph, modelLoaderNodeId); + modelLoaderNodeId = SEAMLESS; + } + // optionally add custom VAE - addVAEToGraph(state, graph, MAIN_MODEL_LOADER); + addVAEToGraph(state, graph, modelLoaderNodeId); // add LoRA support - addLoRAsToGraph(state, graph, DENOISE_LATENTS); + addLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId); // add dynamic prompts - also sets up core iteration and seed addDynamicPromptsToGraph(state, graph); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts index 42ea07c923..17f1052384 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts @@ -11,6 +11,7 @@ import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; +import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { @@ -20,10 +21,12 @@ import { NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, + REFINER_SEAMLESS, RESIZE, SDXL_DENOISE_LATENTS, SDXL_IMAGE_TO_IMAGE_GRAPH, SDXL_MODEL_LOADER, + SEAMLESS, } from './constants'; import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt'; @@ -49,6 +52,8 @@ export const buildLinearSDXLImageToImageGraph = ( shouldUseCpuNoise, shouldUseNoiseSettings, vaePrecision, + seamlessXAxis, + seamlessYAxis, } = state.generation; const { @@ -79,6 +84,9 @@ export const buildLinearSDXLImageToImageGraph = ( throw new Error('No model found in state'); } + // Model Loader ID + let modelLoaderNodeId = SDXL_MODEL_LOADER; + const use_cpu = shouldUseNoiseSettings ? shouldUseCpuNoise : initialGenerationState.shouldUseCpuNoise; @@ -91,9 +99,9 @@ export const buildLinearSDXLImageToImageGraph = ( const graph: NonNullableGraph = { id: SDXL_IMAGE_TO_IMAGE_GRAPH, nodes: { - [SDXL_MODEL_LOADER]: { + [modelLoaderNodeId]: { type: 'sdxl_model_loader', - id: SDXL_MODEL_LOADER, + id: modelLoaderNodeId, model, }, [POSITIVE_CONDITIONING]: { @@ -143,7 +151,7 @@ export const buildLinearSDXLImageToImageGraph = ( // Connect Model Loader to UNet, CLIP & VAE { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'unet', }, destination: { @@ -153,7 +161,7 @@ export const buildLinearSDXLImageToImageGraph = ( }, { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip', }, destination: { @@ -163,7 +171,7 @@ export const buildLinearSDXLImageToImageGraph = ( }, { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip2', }, destination: { @@ -173,7 +181,7 @@ export const buildLinearSDXLImageToImageGraph = ( }, { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip', }, destination: { @@ -183,7 +191,7 @@ export const buildLinearSDXLImageToImageGraph = ( }, { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip2', }, destination: { @@ -351,15 +359,23 @@ export const buildLinearSDXLImageToImageGraph = ( }, }); - addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER); + // Add Seamless To Graph + if (seamlessXAxis || seamlessYAxis) { + addSeamlessToLinearGraph(state, graph, modelLoaderNodeId); + modelLoaderNodeId = SEAMLESS; + } // Add Refiner if enabled if (shouldUseSDXLRefiner) { addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS); + modelLoaderNodeId = REFINER_SEAMLESS; } // optionally add custom VAE - addVAEToGraph(state, graph, SDXL_MODEL_LOADER); + addVAEToGraph(state, graph, modelLoaderNodeId); + + // Add LoRA Support + addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId); // add controlnet, mutating `graph` addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts index a74884f23b..2af37fd905 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts @@ -7,6 +7,7 @@ import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; +import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { @@ -15,9 +16,11 @@ import { NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, + REFINER_SEAMLESS, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER, SDXL_TEXT_TO_IMAGE_GRAPH, + SEAMLESS, } from './constants'; import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt'; @@ -38,6 +41,8 @@ export const buildLinearSDXLTextToImageGraph = ( shouldUseCpuNoise, shouldUseNoiseSettings, vaePrecision, + seamlessXAxis, + seamlessYAxis, } = state.generation; const { @@ -61,6 +66,9 @@ export const buildLinearSDXLTextToImageGraph = ( const { craftedPositiveStylePrompt, craftedNegativeStylePrompt } = craftSDXLStylePrompt(state, shouldConcatSDXLStylePrompt); + // Model Loader ID + let modelLoaderNodeId = SDXL_MODEL_LOADER; + /** * The easiest way to build linear graphs is to do it in the node editor, then copy and paste the * full graph here as a template. Then use the parameters from app state and set friendlier node @@ -74,9 +82,9 @@ export const buildLinearSDXLTextToImageGraph = ( const graph: NonNullableGraph = { id: SDXL_TEXT_TO_IMAGE_GRAPH, nodes: { - [SDXL_MODEL_LOADER]: { + [modelLoaderNodeId]: { type: 'sdxl_model_loader', - id: SDXL_MODEL_LOADER, + id: modelLoaderNodeId, model, }, [POSITIVE_CONDITIONING]: { @@ -117,7 +125,7 @@ export const buildLinearSDXLTextToImageGraph = ( // Connect Model Loader to UNet, VAE & CLIP { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'unet', }, destination: { @@ -127,7 +135,7 @@ export const buildLinearSDXLTextToImageGraph = ( }, { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip', }, destination: { @@ -137,7 +145,7 @@ export const buildLinearSDXLTextToImageGraph = ( }, { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip2', }, destination: { @@ -147,7 +155,7 @@ export const buildLinearSDXLTextToImageGraph = ( }, { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip', }, destination: { @@ -157,7 +165,7 @@ export const buildLinearSDXLTextToImageGraph = ( }, { source: { - node_id: SDXL_MODEL_LOADER, + node_id: modelLoaderNodeId, field: 'clip2', }, destination: { @@ -244,16 +252,23 @@ export const buildLinearSDXLTextToImageGraph = ( }, }); + // Add Seamless To Graph + if (seamlessXAxis || seamlessYAxis) { + addSeamlessToLinearGraph(state, graph, modelLoaderNodeId); + modelLoaderNodeId = SEAMLESS; + } + // Add Refiner if enabled if (shouldUseSDXLRefiner) { addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS); + modelLoaderNodeId = REFINER_SEAMLESS; } // optionally add custom VAE - addVAEToGraph(state, graph, SDXL_MODEL_LOADER); + addVAEToGraph(state, graph, modelLoaderNodeId); // add LoRA support - addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER); + addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId); // add controlnet, mutating `graph` addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearTextToImageGraph.ts index 99a1ec7420..d07534bdd9 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearTextToImageGraph.ts @@ -10,6 +10,7 @@ import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addLoRAsToGraph } from './addLoRAsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; +import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { @@ -22,6 +23,7 @@ import { NOISE, ONNX_MODEL_LOADER, POSITIVE_CONDITIONING, + SEAMLESS, TEXT_TO_IMAGE_GRAPH, } from './constants'; @@ -42,6 +44,8 @@ export const buildLinearTextToImageGraph = ( shouldUseCpuNoise, shouldUseNoiseSettings, vaePrecision, + seamlessXAxis, + seamlessYAxis, } = state.generation; const use_cpu = shouldUseNoiseSettings @@ -55,7 +59,7 @@ export const buildLinearTextToImageGraph = ( const isUsingOnnxModel = model.model_type === 'onnx'; - const modelLoaderNodeId = isUsingOnnxModel + let modelLoaderNodeId = isUsingOnnxModel ? ONNX_MODEL_LOADER : MAIN_MODEL_LOADER; @@ -258,6 +262,12 @@ export const buildLinearTextToImageGraph = ( }, }); + // Add Seamless To Graph + if (seamlessXAxis || seamlessYAxis) { + addSeamlessToLinearGraph(state, graph, modelLoaderNodeId); + modelLoaderNodeId = SEAMLESS; + } + // optionally add custom VAE addVAEToGraph(state, graph, modelLoaderNodeId); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts index 6547d4a092..8ada7393dd 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts @@ -17,6 +17,7 @@ export const CLIP_SKIP = 'clip_skip'; export const IMAGE_TO_LATENTS = 'image_to_latents'; export const LATENTS_TO_LATENTS = 'latents_to_latents'; export const RESIZE = 'resize_image'; +export const IMG2IMG_RESIZE = 'img2img_resize'; export const CANVAS_OUTPUT = 'canvas_output'; export const INPAINT_IMAGE = 'inpaint_image'; export const SCALED_INPAINT_IMAGE = 'scaled_inpaint_image'; @@ -25,6 +26,7 @@ export const INPAINT_IMAGE_RESIZE_DOWN = 'inpaint_image_resize_down'; export const INPAINT_INFILL = 'inpaint_infill'; export const INPAINT_INFILL_RESIZE_DOWN = 'inpaint_infill_resize_down'; export const INPAINT_FINAL_IMAGE = 'inpaint_final_image'; +export const INPAINT_CREATE_MASK = 'inpaint_create_mask'; export const CANVAS_COHERENCE_DENOISE_LATENTS = 'canvas_coherence_denoise_latents'; export const CANVAS_COHERENCE_NOISE = 'canvas_coherence_noise'; @@ -54,6 +56,8 @@ export const SDXL_REFINER_POSITIVE_CONDITIONING = export const SDXL_REFINER_NEGATIVE_CONDITIONING = 'sdxl_refiner_negative_conditioning'; export const SDXL_REFINER_DENOISE_LATENTS = 'sdxl_refiner_denoise_latents'; +export const SEAMLESS = 'seamless'; +export const REFINER_SEAMLESS = 'refiner_seamless'; // friendly graph ids export const TEXT_TO_IMAGE_GRAPH = 'text_to_image_graph'; diff --git a/invokeai/frontend/web/src/features/parameters/hooks/usePreselectedImage.ts b/invokeai/frontend/web/src/features/parameters/hooks/usePreselectedImage.ts new file mode 100644 index 0000000000..6f7ac46f25 --- /dev/null +++ b/invokeai/frontend/web/src/features/parameters/hooks/usePreselectedImage.ts @@ -0,0 +1,81 @@ +import { skipToken } from '@reduxjs/toolkit/dist/query'; +import { t } from 'i18next'; +import { useCallback, useState } from 'react'; +import { useAppToaster } from '../../../app/components/Toaster'; +import { useAppDispatch } from '../../../app/store/storeHooks'; +import { + useGetImageDTOQuery, + useGetImageMetadataQuery, +} from '../../../services/api/endpoints/images'; +import { setInitialCanvasImage } from '../../canvas/store/canvasSlice'; +import { setActiveTab } from '../../ui/store/uiSlice'; +import { initialImageSelected } from '../store/actions'; +import { useRecallParameters } from './useRecallParameters'; + +type SelectedImage = { + imageName: string; + action: 'sendToImg2Img' | 'sendToCanvas' | 'useAllParameters'; +}; + +export const usePreselectedImage = () => { + const dispatch = useAppDispatch(); + const [imageNameForDto, setImageNameForDto] = useState(); + const [imageNameForMetadata, setImageNameForMetadata] = useState< + string | undefined + >(); + const { recallAllParameters } = useRecallParameters(); + const toaster = useAppToaster(); + + const { currentData: selectedImageDto } = useGetImageDTOQuery( + imageNameForDto ?? skipToken + ); + + const { currentData: selectedImageMetadata } = useGetImageMetadataQuery( + imageNameForMetadata ?? skipToken + ); + + const handlePreselectedImage = useCallback( + (selectedImage?: SelectedImage) => { + if (!selectedImage) { + return; + } + + if (selectedImage.action === 'sendToCanvas') { + setImageNameForDto(selectedImage?.imageName); + if (selectedImageDto) { + dispatch(setInitialCanvasImage(selectedImageDto)); + dispatch(setActiveTab('unifiedCanvas')); + toaster({ + title: t('toast.sentToUnifiedCanvas'), + status: 'info', + duration: 2500, + isClosable: true, + }); + } + } + + if (selectedImage.action === 'sendToImg2Img') { + setImageNameForDto(selectedImage?.imageName); + if (selectedImageDto) { + dispatch(initialImageSelected(selectedImageDto)); + } + } + + if (selectedImage.action === 'useAllParameters') { + setImageNameForMetadata(selectedImage?.imageName); + if (selectedImageMetadata) { + recallAllParameters(selectedImageMetadata.metadata); + } + } + }, + [ + dispatch, + selectedImageDto, + selectedImageMetadata, + recallAllParameters, + toaster, + ] + ); + + return { handlePreselectedImage }; +}; diff --git a/invokeai/frontend/web/src/features/sdxl/components/SDXLImageToImageTabParameters.tsx b/invokeai/frontend/web/src/features/sdxl/components/SDXLImageToImageTabParameters.tsx index 4667ca63c0..2b40eca382 100644 --- a/invokeai/frontend/web/src/features/sdxl/components/SDXLImageToImageTabParameters.tsx +++ b/invokeai/frontend/web/src/features/sdxl/components/SDXLImageToImageTabParameters.tsx @@ -2,6 +2,7 @@ import ParamDynamicPromptsCollapse from 'features/dynamicPrompts/components/Para import ParamLoraCollapse from 'features/lora/components/ParamLoraCollapse'; import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse'; import ParamNoiseCollapse from 'features/parameters/components/Parameters/Noise/ParamNoiseCollapse'; +import ParamSeamlessCollapse from 'features/parameters/components/Parameters/Seamless/ParamSeamlessCollapse'; import { memo } from 'react'; import ParamSDXLPromptArea from './ParamSDXLPromptArea'; import ParamSDXLRefinerCollapse from './ParamSDXLRefinerCollapse'; @@ -17,6 +18,7 @@ const SDXLImageToImageTabParameters = () => { + ); }; diff --git a/invokeai/frontend/web/src/features/sdxl/components/SDXLTextToImageTabParameters.tsx b/invokeai/frontend/web/src/features/sdxl/components/SDXLTextToImageTabParameters.tsx index 084c12af61..ff47a42207 100644 --- a/invokeai/frontend/web/src/features/sdxl/components/SDXLTextToImageTabParameters.tsx +++ b/invokeai/frontend/web/src/features/sdxl/components/SDXLTextToImageTabParameters.tsx @@ -2,6 +2,7 @@ import ParamDynamicPromptsCollapse from 'features/dynamicPrompts/components/Para import ParamLoraCollapse from 'features/lora/components/ParamLoraCollapse'; import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse'; import ParamNoiseCollapse from 'features/parameters/components/Parameters/Noise/ParamNoiseCollapse'; +import ParamSeamlessCollapse from 'features/parameters/components/Parameters/Seamless/ParamSeamlessCollapse'; import TextToImageTabCoreParameters from 'features/ui/components/tabs/TextToImage/TextToImageTabCoreParameters'; import { memo } from 'react'; import ParamSDXLPromptArea from './ParamSDXLPromptArea'; @@ -17,6 +18,7 @@ const SDXLTextToImageTabParameters = () => { + ); }; diff --git a/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabParameters.tsx b/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabParameters.tsx index 00432bcec8..8fc4a3181c 100644 --- a/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabParameters.tsx +++ b/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabParameters.tsx @@ -5,6 +5,7 @@ import ParamMaskAdjustmentCollapse from 'features/parameters/components/Paramete import ParamCanvasCoherencePassCollapse from 'features/parameters/components/Parameters/Canvas/SeamPainting/ParamCanvasCoherencePassCollapse'; import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse'; import ParamNoiseCollapse from 'features/parameters/components/Parameters/Noise/ParamNoiseCollapse'; +import ParamSeamlessCollapse from 'features/parameters/components/Parameters/Seamless/ParamSeamlessCollapse'; import ParamSDXLPromptArea from './ParamSDXLPromptArea'; import ParamSDXLRefinerCollapse from './ParamSDXLRefinerCollapse'; import SDXLUnifiedCanvasTabCoreParameters from './SDXLUnifiedCanvasTabCoreParameters'; @@ -22,6 +23,7 @@ export default function SDXLUnifiedCanvasTabParameters() { + ); } diff --git a/invokeai/frontend/web/src/features/system/store/configSlice.ts b/invokeai/frontend/web/src/features/system/store/configSlice.ts index 6cff92a136..9bdc16ce77 100644 --- a/invokeai/frontend/web/src/features/system/store/configSlice.ts +++ b/invokeai/frontend/web/src/features/system/store/configSlice.ts @@ -9,7 +9,6 @@ export const initialConfigState: AppConfig = { disabledFeatures: ['lightbox', 'faceRestore', 'batches'], disabledSDFeatures: [ 'variation', - 'seamless', 'symmetry', 'hires', 'perlinNoise', diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasParameters.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasParameters.tsx index 1c3d3cd270..a640e1bae4 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasParameters.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasParameters.tsx @@ -6,6 +6,7 @@ import ParamMaskAdjustmentCollapse from 'features/parameters/components/Paramete import ParamCanvasCoherencePassCollapse from 'features/parameters/components/Parameters/Canvas/SeamPainting/ParamCanvasCoherencePassCollapse'; import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse'; import ParamPromptArea from 'features/parameters/components/Parameters/Prompt/ParamPromptArea'; +import ParamSeamlessCollapse from 'features/parameters/components/Parameters/Seamless/ParamSeamlessCollapse'; import ParamSymmetryCollapse from 'features/parameters/components/Parameters/Symmetry/ParamSymmetryCollapse'; import { memo } from 'react'; import UnifiedCanvasCoreParameters from './UnifiedCanvasCoreParameters'; @@ -22,6 +23,7 @@ const UnifiedCanvasParameters = () => { + ); diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index 516ee49d48..6dee0fdaa2 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -1584,6 +1584,56 @@ export type components = { */ refiner_start?: number; }; + /** + * Create Denoise Mask + * @description Creates mask for denoising model run. + */ + CreateDenoiseMaskInvocation: { + /** + * Id + * @description The id of this node. Must be unique among all nodes. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this node is an intermediate node. + * @default false + */ + is_intermediate?: boolean; + /** + * Type + * @default create_denoise_mask + * @enum {string} + */ + type: "create_denoise_mask"; + /** + * Vae + * @description VAE + */ + vae?: components["schemas"]["VaeField"]; + /** + * Image + * @description Image which will be masked + */ + image?: components["schemas"]["ImageField"]; + /** + * Mask + * @description The mask to use when pasting + */ + mask?: components["schemas"]["ImageField"]; + /** + * Tiled + * @description Processing using overlapping tiles (reduce memory consumption) + * @default false + */ + tiled?: boolean; + /** + * Fp32 + * @description Whether or not to use full float32 precision + * @default false + */ + fp32?: boolean; + }; /** * OpenCV Inpaint * @description Simple inpaint using opencv. @@ -1709,10 +1759,10 @@ export type components = { */ latents?: components["schemas"]["LatentsField"]; /** - * Mask + * Denoise Mask * @description The mask to use for the operation */ - mask?: components["schemas"]["ImageField"]; + denoise_mask?: components["schemas"]["DenoiseMaskField"]; /** * Positive Conditioning * @description Positive conditioning tensor @@ -1729,6 +1779,39 @@ export type components = { */ unet?: components["schemas"]["UNetField"]; }; + /** + * DenoiseMaskField + * @description An inpaint mask field + */ + DenoiseMaskField: { + /** + * Mask Name + * @description The name of the mask image + */ + mask_name: string; + /** + * Masked Latents Name + * @description The name of the masked image latents + */ + masked_latents_name?: string; + }; + /** + * DenoiseMaskOutput + * @description Base class for nodes that output a single image + */ + DenoiseMaskOutput: { + /** + * Type + * @default denoise_mask_output + * @enum {string} + */ + type: "denoise_mask_output"; + /** + * Denoise Mask + * @description Mask for denoise model run + */ + denoise_mask: components["schemas"]["DenoiseMaskField"]; + }; /** * Divide Integers * @description Divides two numbers @@ -2009,7 +2092,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; + [key: string]: components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; /** * Edges @@ -2052,7 +2135,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["BooleanOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"]; + [key: string]: components["schemas"]["BooleanOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"]; }; /** * Errors @@ -2912,6 +2995,11 @@ export type components = { * @enum {string} */ resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; + /** + * Metadata + * @description Optional core metadata to be written to image + */ + metadata?: components["schemas"]["CoreMetadata"]; }; /** * Image Saturation Adjustment @@ -5619,6 +5707,73 @@ export type components = { * @enum {string} */ SchedulerPredictionType: "epsilon" | "v_prediction" | "sample"; + /** + * Seamless + * @description Applies the seamless transformation to the Model UNet and VAE. + */ + SeamlessModeInvocation: { + /** + * Id + * @description The id of this node. Must be unique among all nodes. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this node is an intermediate node. + * @default false + */ + is_intermediate?: boolean; + /** + * Type + * @default seamless + * @enum {string} + */ + type: "seamless"; + /** + * UNet + * @description UNet (scheduler, LoRAs) + */ + unet?: components["schemas"]["UNetField"]; + /** + * VAE + * @description VAE model to load + */ + vae?: components["schemas"]["VaeField"]; + /** + * Seamless Y + * @description Specify whether Y axis is seamless + * @default true + */ + seamless_y?: boolean; + /** + * Seamless X + * @description Specify whether X axis is seamless + * @default true + */ + seamless_x?: boolean; + }; + /** + * SeamlessModeOutput + * @description Modified Seamless Model output + */ + SeamlessModeOutput: { + /** + * Type + * @default seamless_output + * @enum {string} + */ + type: "seamless_output"; + /** + * UNet + * @description UNet (scheduler, LoRAs) + */ + unet?: components["schemas"]["UNetField"]; + /** + * VAE + * @description VAE + */ + vae?: components["schemas"]["VaeField"]; + }; /** * Segment Anything Processor * @description Applies segment anything processing to image @@ -6108,6 +6263,11 @@ export type components = { * @description Loras to apply on model loading */ loras: components["schemas"]["LoraInfo"][]; + /** + * Seamless Axes + * @description Axes("x" and "y") to which apply seamless + */ + seamless_axes?: string[]; }; /** Upscaler */ Upscaler: { @@ -6142,6 +6302,11 @@ export type components = { * @description Info to load vae submodel */ vae: components["schemas"]["ModelInfo"]; + /** + * Seamless Axes + * @description Axes("x" and "y") to which apply seamless + */ + seamless_axes?: string[]; }; /** * VAE @@ -6318,18 +6483,6 @@ export type components = { /** Ui Order */ ui_order?: number; }; - /** - * ControlNetModelFormat - * @description An enumeration. - * @enum {string} - */ - ControlNetModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusion1ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusionOnnxModelFormat * @description An enumeration. @@ -6342,6 +6495,18 @@ export type components = { * @enum {string} */ StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusion1ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; + /** + * ControlNetModelFormat + * @description An enumeration. + * @enum {string} + */ + ControlNetModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusion2ModelFormat * @description An enumeration. @@ -6464,7 +6629,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; + "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; }; responses: { @@ -6505,7 +6670,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; + "application/json": components["schemas"]["BooleanInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; }; responses: { diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index 4e30794a51..51e2459b7f 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -111,6 +111,7 @@ export type ImageBlurInvocation = s['ImageBlurInvocation']; export type ImageScaleInvocation = s['ImageScaleInvocation']; export type InfillPatchMatchInvocation = s['InfillPatchMatchInvocation']; export type InfillTileInvocation = s['InfillTileInvocation']; +export type CreateDenoiseMaskInvocation = s['CreateDenoiseMaskInvocation']; export type RandomIntInvocation = s['RandomIntInvocation']; export type CompelInvocation = s['CompelInvocation']; export type DynamicPromptInvocation = s['DynamicPromptInvocation']; @@ -129,6 +130,7 @@ export type ESRGANInvocation = s['ESRGANInvocation']; export type DivideInvocation = s['DivideInvocation']; export type ImageNSFWBlurInvocation = s['ImageNSFWBlurInvocation']; export type ImageWatermarkInvocation = s['ImageWatermarkInvocation']; +export type SeamlessModeInvocation = s['SeamlessModeInvocation']; // ControlNet Nodes export type ControlNetInvocation = s['ControlNetInvocation'];