Merge branch 'main' into feat/nodes-phase-5

This commit is contained in:
blessedcoolant
2023-08-29 12:05:28 +12:00
78 changed files with 2008 additions and 786 deletions

View File

@ -1,11 +1,11 @@
# Copyright (c) 2022-2023 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team
import asyncio
from inspect import signature
import logging
import uvicorn
import socket
from inspect import signature
from pathlib import Path
import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html
@ -13,7 +13,6 @@ from fastapi.openapi.utils import get_openapi
from fastapi.staticfiles import StaticFiles
from fastapi_events.handlers.local import local_handler
from fastapi_events.middleware import EventHandlerASGIMiddleware
from pathlib import Path
from pydantic.schema import schema
from .services.config import InvokeAIAppConfig
@ -30,9 +29,12 @@ from .api.sockets import SocketIO
from .invocations.baseinvocation import BaseInvocation, _InputField, _OutputField, UIConfigBase
import torch
# noinspection PyUnresolvedReferences
import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import)
if torch.backends.mps.is_available():
# noinspection PyUnresolvedReferences
import invokeai.backend.util.mps_fixes # noqa: F401 (monkeypatching on import)
@ -40,7 +42,6 @@ app_config = InvokeAIAppConfig.get_config()
app_config.parse_args()
logger = InvokeAILogger.getLogger(config=app_config)
# fix for windows mimetypes registry entries being borked
# see https://github.com/invoke-ai/InvokeAI/discussions/3684#discussioncomment-6391352
mimetypes.add_type("application/javascript", ".js")
@ -208,6 +209,17 @@ def invoke_api():
check_invokeai_root(app_config) # note, may exit with an exception if root not set up
if app_config.dev_reload:
try:
import jurigged
except ImportError as e:
logger.error(
'Can\'t start `--dev_reload` because jurigged is not found; `pip install -e ".[dev]"` to include development dependencies.',
exc_info=e,
)
else:
jurigged.watch(logger=InvokeAILogger.getLogger(name="jurigged").info)
port = find_port(app_config.port)
if port != app_config.port:
logger.warn(f"Port {app_config.port} in use, using port {port}")

View File

@ -383,6 +383,9 @@ class ImageResizeInvocation(BaseInvocation):
width: int = InputField(default=512, ge=64, multiple_of=8, description="The width to resize to (px)")
height: int = InputField(default=512, ge=64, multiple_of=8, description="The height to resize to (px)")
resample_mode: PIL_RESAMPLING_MODES = InputField(default="bicubic", description="The resampling mode")
metadata: Optional[CoreMetadata] = InputField(
default=None, description=FieldDescriptions.core_metadata, ui_hidden=True
)
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)

View File

@ -21,6 +21,8 @@ from torchvision.transforms.functional import resize as tv_resize
from invokeai.app.invocations.metadata import CoreMetadata
from invokeai.app.invocations.primitives import (
DenoiseMaskField,
DenoiseMaskOutput,
ImageField,
ImageOutput,
LatentsField,
@ -31,8 +33,9 @@ from invokeai.app.util.controlnet_utils import prepare_control_image
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend.model_management.models import ModelType, SilenceWarnings
from ...backend.model_management.models import BaseModelType
from ...backend.model_management.lora import ModelPatcher
from ...backend.model_management.seamless import set_seamless
from ...backend.model_management.models import BaseModelType
from ...backend.stable_diffusion import PipelineIntermediateState
from ...backend.stable_diffusion.diffusers_pipeline import (
ConditioningData,
@ -44,16 +47,7 @@ from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import Post
from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
from ...backend.util.devices import choose_precision, choose_torch_device
from ..models.image import ImageCategory, ResourceOrigin
from .baseinvocation import (
BaseInvocation,
FieldDescriptions,
Input,
InputField,
InvocationContext,
UIType,
tags,
title,
)
from .baseinvocation import BaseInvocation, FieldDescriptions, Input, InputField, InvocationContext, UIType, tags, title
from .compel import ConditioningField
from .controlnet_image_processors import ControlField
from .model import ModelInfo, UNetField, VaeField
@ -64,6 +58,72 @@ DEFAULT_PRECISION = choose_precision(choose_torch_device())
SAMPLER_NAME_VALUES = Literal[tuple(list(SCHEDULER_MAP.keys()))]
@title("Create Denoise Mask")
@tags("mask", "denoise")
class CreateDenoiseMaskInvocation(BaseInvocation):
"""Creates mask for denoising model run."""
# Metadata
type: Literal["create_denoise_mask"] = "create_denoise_mask"
# Inputs
vae: VaeField = InputField(description=FieldDescriptions.vae, input=Input.Connection, ui_order=0)
image: Optional[ImageField] = InputField(default=None, description="Image which will be masked", ui_order=1)
mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2)
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3)
fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32, ui_order=4)
def prep_mask_tensor(self, mask_image):
if mask_image.mode != "L":
mask_image = mask_image.convert("L")
mask_tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
if mask_tensor.dim() == 3:
mask_tensor = mask_tensor.unsqueeze(0)
# if shape is not None:
# mask_tensor = tv_resize(mask_tensor, shape, T.InterpolationMode.BILINEAR)
return mask_tensor
@torch.no_grad()
def invoke(self, context: InvocationContext) -> DenoiseMaskOutput:
if self.image is not None:
image = context.services.images.get_pil_image(self.image.image_name)
image = image_resized_to_grid_as_tensor(image.convert("RGB"))
if image.dim() == 3:
image = image.unsqueeze(0)
else:
image = None
mask = self.prep_mask_tensor(
context.services.images.get_pil_image(self.mask.image_name),
)
if image is not None:
vae_info = context.services.model_manager.get_model(
**self.vae.vae.dict(),
context=context,
)
img_mask = tv_resize(mask, image.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
masked_image = image * torch.where(img_mask < 0.5, 0.0, 1.0)
# TODO:
masked_latents = ImageToLatentsInvocation.vae_encode(vae_info, self.fp32, self.tiled, masked_image.clone())
masked_latents_name = f"{context.graph_execution_state_id}__{self.id}_masked_latents"
context.services.latents.save(masked_latents_name, masked_latents)
else:
masked_latents_name = None
mask_name = f"{context.graph_execution_state_id}__{self.id}_mask"
context.services.latents.save(mask_name, mask)
return DenoiseMaskOutput(
denoise_mask=DenoiseMaskField(
mask_name=mask_name,
masked_latents_name=masked_latents_name,
),
)
def get_scheduler(
context: InvocationContext,
scheduler_info: ModelInfo,
@ -126,10 +186,8 @@ class DenoiseLatentsInvocation(BaseInvocation):
control: Union[ControlField, list[ControlField]] = InputField(
default=None, description=FieldDescriptions.control, input=Input.Connection, ui_order=5
)
latents: Optional[LatentsField] = InputField(
description=FieldDescriptions.latents, input=Input.Connection, ui_order=4
)
mask: Optional[ImageField] = InputField(
latents: Optional[LatentsField] = InputField(description=FieldDescriptions.latents, input=Input.Connection)
denoise_mask: Optional[DenoiseMaskField] = InputField(
default=None,
description=FieldDescriptions.mask,
)
@ -342,19 +400,18 @@ class DenoiseLatentsInvocation(BaseInvocation):
return num_inference_steps, timesteps, init_timestep
def prep_mask_tensor(self, mask, context, lantents):
if mask is None:
return None
def prep_inpaint_mask(self, context, latents):
if self.denoise_mask is None:
return None, None
mask_image = context.services.images.get_pil_image(mask.image_name)
if mask_image.mode != "L":
# FIXME: why do we get passed an RGB image here? We can only use single-channel.
mask_image = mask_image.convert("L")
mask_tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
if mask_tensor.dim() == 3:
mask_tensor = mask_tensor.unsqueeze(0)
mask_tensor = tv_resize(mask_tensor, lantents.shape[-2:], T.InterpolationMode.BILINEAR)
return 1 - mask_tensor
mask = context.services.latents.get(self.denoise_mask.mask_name)
mask = tv_resize(mask, latents.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
if self.denoise_mask.masked_latents_name is not None:
masked_latents = context.services.latents.get(self.denoise_mask.masked_latents_name)
else:
masked_latents = None
return 1 - mask, masked_latents
@torch.no_grad()
def invoke(self, context: InvocationContext) -> LatentsOutput:
@ -375,7 +432,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
if seed is None:
seed = 0
mask = self.prep_mask_tensor(self.mask, context, latents)
mask, masked_latents = self.prep_inpaint_mask(context, latents)
# Get the source node id (we are invoking the prepared node)
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
@ -400,12 +457,14 @@ class DenoiseLatentsInvocation(BaseInvocation):
)
with ExitStack() as exit_stack, ModelPatcher.apply_lora_unet(
unet_info.context.model, _lora_loader()
), unet_info as unet:
), set_seamless(unet_info.context.model, self.unet.seamless_axes), unet_info as unet:
latents = latents.to(device=unet.device, dtype=unet.dtype)
if noise is not None:
noise = noise.to(device=unet.device, dtype=unet.dtype)
if mask is not None:
mask = mask.to(device=unet.device, dtype=unet.dtype)
if masked_latents is not None:
masked_latents = masked_latents.to(device=unet.device, dtype=unet.dtype)
scheduler = get_scheduler(
context=context,
@ -442,6 +501,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
noise=noise,
seed=seed,
mask=mask,
masked_latents=masked_latents,
num_inference_steps=num_inference_steps,
conditioning_data=conditioning_data,
control_data=control_data, # list[ControlNetData]
@ -490,7 +550,7 @@ class LatentsToImageInvocation(BaseInvocation):
context=context,
)
with vae_info as vae:
with set_seamless(vae_info.context.model, self.vae.seamless_axes), vae_info as vae:
latents = latents.to(vae.device)
if self.fp32:
vae.to(dtype=torch.float32)
@ -664,26 +724,11 @@ class ImageToLatentsInvocation(BaseInvocation):
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled)
fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32)
@torch.no_grad()
def invoke(self, context: InvocationContext) -> LatentsOutput:
# image = context.services.images.get(
# self.image.image_type, self.image.image_name
# )
image = context.services.images.get_pil_image(self.image.image_name)
# vae_info = context.services.model_manager.get_model(**self.vae.vae.dict())
vae_info = context.services.model_manager.get_model(
**self.vae.vae.dict(),
context=context,
)
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
if image_tensor.dim() == 3:
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
@staticmethod
def vae_encode(vae_info, upcast, tiled, image_tensor):
with vae_info as vae:
orig_dtype = vae.dtype
if self.fp32:
if upcast:
vae.to(dtype=torch.float32)
use_torch_2_0_or_xformers = isinstance(
@ -708,7 +753,7 @@ class ImageToLatentsInvocation(BaseInvocation):
vae.to(dtype=torch.float16)
# latents = latents.half()
if self.tiled:
if tiled:
vae.enable_tiling()
else:
vae.disable_tiling()
@ -722,6 +767,23 @@ class ImageToLatentsInvocation(BaseInvocation):
latents = vae.config.scaling_factor * latents
latents = latents.to(dtype=orig_dtype)
return latents
@torch.no_grad()
def invoke(self, context: InvocationContext) -> LatentsOutput:
image = context.services.images.get_pil_image(self.image.image_name)
vae_info = context.services.model_manager.get_model(
**self.vae.vae.dict(),
context=context,
)
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
if image_tensor.dim() == 3:
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
latents = self.vae_encode(vae_info, self.fp32, self.tiled, image_tensor)
name = f"{context.graph_execution_state_id}__{self.id}"
latents = latents.to("cpu")
context.services.latents.save(name, latents)

View File

@ -32,6 +32,7 @@ class CoreMetadata(BaseModelExcludeNull):
generation_mode: str = Field(
description="The generation mode that output this image",
)
created_by: Optional[str] = Field(description="The name of the creator of the image")
positive_prompt: str = Field(description="The positive prompt parameter")
negative_prompt: str = Field(description="The negative prompt parameter")
width: int = Field(description="The width parameter")

View File

@ -8,8 +8,8 @@ from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
InputField,
Input,
InputField,
InvocationContext,
OutputField,
UIType,
@ -33,6 +33,7 @@ class UNetField(BaseModel):
unet: ModelInfo = Field(description="Info to load unet submodel")
scheduler: ModelInfo = Field(description="Info to load scheduler submodel")
loras: List[LoraInfo] = Field(description="Loras to apply on model loading")
seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless')
class ClipField(BaseModel):
@ -45,6 +46,7 @@ class ClipField(BaseModel):
class VaeField(BaseModel):
# TODO: better naming?
vae: ModelInfo = Field(description="Info to load vae submodel")
seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless')
class ModelLoaderOutput(BaseInvocationOutput):
@ -388,3 +390,50 @@ class VaeLoaderInvocation(BaseInvocation):
)
)
)
class SeamlessModeOutput(BaseInvocationOutput):
"""Modified Seamless Model output"""
type: Literal["seamless_output"] = "seamless_output"
# Outputs
unet: Optional[UNetField] = OutputField(description=FieldDescriptions.unet, title="UNet")
vae: Optional[VaeField] = OutputField(description=FieldDescriptions.vae, title="VAE")
@title("Seamless")
@tags("seamless", "model")
class SeamlessModeInvocation(BaseInvocation):
"""Applies the seamless transformation to the Model UNet and VAE."""
type: Literal["seamless"] = "seamless"
# Inputs
unet: Optional[UNetField] = InputField(
default=None, description=FieldDescriptions.unet, input=Input.Connection, title="UNet"
)
vae: Optional[VaeField] = InputField(
default=None, description=FieldDescriptions.vae_model, input=Input.Connection, title="VAE"
)
seamless_y: bool = InputField(default=True, input=Input.Any, description="Specify whether Y axis is seamless")
seamless_x: bool = InputField(default=True, input=Input.Any, description="Specify whether X axis is seamless")
def invoke(self, context: InvocationContext) -> SeamlessModeOutput:
# Conditionally append 'x' and 'y' based on seamless_x and seamless_y
unet = copy.deepcopy(self.unet)
vae = copy.deepcopy(self.vae)
seamless_axes_list = []
if self.seamless_x:
seamless_axes_list.append("x")
if self.seamless_y:
seamless_axes_list.append("y")
if unet is not None:
unet.seamless_axes = seamless_axes_list
if vae is not None:
vae.seamless_axes = seamless_axes_list
return SeamlessModeOutput(unet=unet, vae=vae)

View File

@ -294,6 +294,25 @@ class ImageCollectionInvocation(BaseInvocation):
return ImageCollectionOutput(collection=self.collection)
# endregion
# region DenoiseMask
class DenoiseMaskField(BaseModel):
"""An inpaint mask field"""
mask_name: str = Field(description="The name of the mask image")
masked_latents_name: Optional[str] = Field(description="The name of the masked image latents")
class DenoiseMaskOutput(BaseInvocationOutput):
"""Base class for nodes that output a single image"""
type: Literal["denoise_mask_output"] = "denoise_mask_output"
denoise_mask: DenoiseMaskField = OutputField(description="Mask for denoise model run")
# endregion
# region Latents

View File

@ -169,11 +169,13 @@ two configs are kept in separate sections of the config file:
"""
from __future__ import annotations
import os
from omegaconf import OmegaConf, DictConfig
from pathlib import Path
from typing import ClassVar, Dict, List, Literal, Union, get_type_hints, Optional
from omegaconf import OmegaConf, DictConfig
from pydantic import Field, parse_obj_as
from typing import ClassVar, Dict, List, Literal, Union, Optional, get_type_hints
from .base import InvokeAISettings
@ -233,6 +235,8 @@ class InvokeAIAppConfig(InvokeAISettings):
log_format : Literal['plain', 'color', 'syslog', 'legacy'] = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style', category="Logging")
log_level : Literal["debug", "info", "warning", "error", "critical"] = Field(default="info", description="Emit logging messages at this level or higher", category="Logging")
dev_reload : bool = Field(default=False, description="Automatically reload when Python sources are changed.", category="Development")
version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other")
# CACHE

View File

@ -20,7 +20,8 @@ def _conv_forward_asymmetric(self, input, weight, bias):
def configure_model_padding(model, seamless, seamless_axes):
"""
Modifies the 2D convolution layers to use a circular padding mode based on the `seamless` and `seamless_axes` options.
Modifies the 2D convolution layers to use a circular padding mode based on
the `seamless` and `seamless_axes` options.
"""
# TODO: get an explicit interface for this in diffusers: https://github.com/huggingface/diffusers/issues/556
for m in model.modules():

View File

@ -50,7 +50,7 @@ from invokeai.frontend.install.model_install import addModelsForm, process_and_e
# TO DO - Move all the frontend code into invokeai.frontend.install
from invokeai.frontend.install.widgets import (
SingleSelectColumns,
SingleSelectColumnsSimple,
MultiSelectColumns,
CenteredButtonPress,
FileBox,
@ -354,7 +354,6 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
device = old_opts.device
attention_type = old_opts.attention_type
attention_slice_size = old_opts.attention_slice_size
self.nextrely += 1
self.add_widget_intelligent(
npyscreen.TitleFixedText,
@ -385,7 +384,7 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
)
self.nextrely -= 2
self.precision = self.add_widget_intelligent(
SingleSelectColumns,
SingleSelectColumnsSimple,
columns=len(PRECISION_CHOICES),
name="Precision",
values=PRECISION_CHOICES,
@ -406,10 +405,10 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
)
self.nextrely -= 2
self.device = self.add_widget_intelligent(
SingleSelectColumns,
SingleSelectColumnsSimple,
columns=len(DEVICE_CHOICES),
values=DEVICE_CHOICES,
value=DEVICE_CHOICES.index(device),
value=[DEVICE_CHOICES.index(device)],
begin_entry_at=3,
relx=30,
max_height=2,
@ -426,10 +425,10 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
)
self.nextrely -= 2
self.attention_type = self.add_widget_intelligent(
SingleSelectColumns,
SingleSelectColumnsSimple,
columns=len(ATTENTION_CHOICES),
values=ATTENTION_CHOICES,
value=ATTENTION_CHOICES.index(attention_type),
value=[ATTENTION_CHOICES.index(attention_type)],
begin_entry_at=3,
max_height=2,
relx=30,
@ -448,17 +447,16 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
)
self.nextrely -= 2
self.attention_slice_size = self.add_widget_intelligent(
SingleSelectColumns,
SingleSelectColumnsSimple,
columns=len(ATTENTION_SLICE_CHOICES),
values=ATTENTION_SLICE_CHOICES,
value=ATTENTION_SLICE_CHOICES.index(attention_slice_size),
value=[ATTENTION_SLICE_CHOICES.index(attention_slice_size)],
relx=30,
hidden=attention_type != "sliced",
max_height=2,
max_width=110,
scroll_exit=True,
)
self.add_widget_intelligent(
npyscreen.TitleFixedText,
name="Model RAM cache size (GB). Make this at least large enough to hold a single full model.",
@ -707,8 +705,6 @@ def initialize_rootdir(root: Path, yes_to_all: bool = False):
path = dest / "core"
path.mkdir(parents=True, exist_ok=True)
maybe_create_models_yaml(root)
def maybe_create_models_yaml(root: Path):
models_yaml = root / "configs" / "models.yaml"

View File

@ -0,0 +1,103 @@
from __future__ import annotations
from contextlib import contextmanager
from typing import List, Union
import torch.nn as nn
from diffusers.models import AutoencoderKL, UNet2DConditionModel
def _conv_forward_asymmetric(self, input, weight, bias):
"""
Patch for Conv2d._conv_forward that supports asymmetric padding
"""
working = nn.functional.pad(input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"])
working = nn.functional.pad(working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"])
return nn.functional.conv2d(
working,
weight,
bias,
self.stride,
nn.modules.utils._pair(0),
self.dilation,
self.groups,
)
@contextmanager
def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL], seamless_axes: List[str]):
try:
to_restore = []
for m_name, m in model.named_modules():
if isinstance(model, UNet2DConditionModel):
if ".attentions." in m_name:
continue
if ".resnets." in m_name:
if ".conv2" in m_name:
continue
if ".conv_shortcut" in m_name:
continue
"""
if isinstance(model, UNet2DConditionModel):
if False and ".upsamplers." in m_name:
continue
if False and ".downsamplers." in m_name:
continue
if True and ".resnets." in m_name:
if True and ".conv1" in m_name:
if False and "down_blocks" in m_name:
continue
if False and "mid_block" in m_name:
continue
if False and "up_blocks" in m_name:
continue
if True and ".conv2" in m_name:
continue
if True and ".conv_shortcut" in m_name:
continue
if True and ".attentions." in m_name:
continue
if False and m_name in ["conv_in", "conv_out"]:
continue
"""
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
print(f"applied - {m_name}")
m.asymmetric_padding_mode = {}
m.asymmetric_padding = {}
m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant"
m.asymmetric_padding["x"] = (
m._reversed_padding_repeated_twice[0],
m._reversed_padding_repeated_twice[1],
0,
0,
)
m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant"
m.asymmetric_padding["y"] = (
0,
0,
m._reversed_padding_repeated_twice[2],
m._reversed_padding_repeated_twice[3],
)
to_restore.append((m, m._conv_forward))
m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d)
yield
finally:
for module, orig_conv_forward in to_restore:
module._conv_forward = orig_conv_forward
if hasattr(m, "asymmetric_padding_mode"):
del m.asymmetric_padding_mode
if hasattr(m, "asymmetric_padding"):
del m.asymmetric_padding

View File

@ -144,7 +144,7 @@ def image_resized_to_grid_as_tensor(image: PIL.Image.Image, normalize: bool = Tr
w, h = trim_to_multiple_of(*image.size, multiple_of=multiple_of)
transformation = T.Compose(
[
T.Resize((h, w), T.InterpolationMode.LANCZOS),
T.Resize((h, w), T.InterpolationMode.LANCZOS, antialias=True),
T.ToTensor(),
]
)
@ -358,6 +358,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
callback: Callable[[PipelineIntermediateState], None] = None,
control_data: List[ControlNetData] = None,
mask: Optional[torch.Tensor] = None,
masked_latents: Optional[torch.Tensor] = None,
seed: Optional[int] = None,
) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]:
if init_timestep.shape[0] == 0:
@ -376,28 +377,28 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
latents = self.scheduler.add_noise(latents, noise, batched_t)
if mask is not None:
# if no noise provided, noisify unmasked area based on seed(or 0 as fallback)
if noise is None:
noise = torch.randn(
orig_latents.shape,
dtype=torch.float32,
device="cpu",
generator=torch.Generator(device="cpu").manual_seed(seed or 0),
).to(device=orig_latents.device, dtype=orig_latents.dtype)
latents = self.scheduler.add_noise(latents, noise, batched_t)
latents = torch.lerp(
orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype)
)
if is_inpainting_model(self.unet):
# You'd think the inpainting model wouldn't be paying attention to the area it is going to repaint
# (that's why there's a mask!) but it seems to really want that blanked out.
# masked_latents = latents * torch.where(mask < 0.5, 1, 0) TODO: inpaint/outpaint/infill
if masked_latents is None:
raise Exception("Source image required for inpaint mask when inpaint model used!")
# TODO: we should probably pass this in so we don't have to try/finally around setting it.
self.invokeai_diffuser.model_forward_callback = AddsMaskLatents(self._unet_forward, mask, orig_latents)
self.invokeai_diffuser.model_forward_callback = AddsMaskLatents(
self._unet_forward, mask, masked_latents
)
else:
# if no noise provided, noisify unmasked area based on seed(or 0 as fallback)
if noise is None:
noise = torch.randn(
orig_latents.shape,
dtype=torch.float32,
device="cpu",
generator=torch.Generator(device="cpu").manual_seed(seed or 0),
).to(device=orig_latents.device, dtype=orig_latents.dtype)
latents = self.scheduler.add_noise(latents, noise, batched_t)
latents = torch.lerp(
orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype)
)
additional_guidance.append(AddsMaskGuidance(mask, orig_latents, self.scheduler, noise))
try:

View File

@ -761,3 +761,18 @@ class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin):
diffusers.ControlNetModel = ControlNetModel
diffusers.models.controlnet.ControlNetModel = ControlNetModel
# patch LoRACompatibleConv to use original Conv2D forward function
# this needed to make work seamless patch
# NOTE: with this patch, torch.compile crashes on 2.0 torch(already fixed in nightly)
# https://github.com/huggingface/diffusers/pull/4315
# https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/lora.py#L96C18-L96C18
def new_LoRACompatibleConv_forward(self, x):
if self.lora_layer is None:
return super(diffusers.models.lora.LoRACompatibleConv, self).forward(x)
else:
return super(diffusers.models.lora.LoRACompatibleConv, self).forward(x) + self.lora_layer(x)
diffusers.models.lora.LoRACompatibleConv.forward = new_LoRACompatibleConv_forward

View File

@ -177,6 +177,8 @@ class FloatTitleSlider(npyscreen.TitleText):
class SelectColumnBase:
"""Base class for selection widget arranged in columns."""
def make_contained_widgets(self):
self._my_widgets = []
column_width = self.width // self.columns
@ -253,6 +255,7 @@ class MultiSelectColumns(SelectColumnBase, npyscreen.MultiSelect):
class SingleSelectWithChanged(npyscreen.SelectOne):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.on_changed = None
def h_select(self, ch):
super().h_select(ch)
@ -260,7 +263,9 @@ class SingleSelectWithChanged(npyscreen.SelectOne):
self.on_changed(self.value)
class SingleSelectColumns(SelectColumnBase, SingleSelectWithChanged):
class SingleSelectColumnsSimple(SelectColumnBase, SingleSelectWithChanged):
"""Row of radio buttons. Spacebar to select."""
def __init__(self, screen, columns: int = 1, values: list = [], **keywords):
self.columns = columns
self.value_cnt = len(values)
@ -268,12 +273,6 @@ class SingleSelectColumns(SelectColumnBase, SingleSelectWithChanged):
self.on_changed = None
super().__init__(screen, values=values, **keywords)
def when_value_edited(self):
self.h_select(self.cursor_line)
def when_cursor_moved(self):
self.h_select(self.cursor_line)
def h_cursor_line_right(self, ch):
self.h_exit_down("bye bye")
@ -281,6 +280,13 @@ class SingleSelectColumns(SelectColumnBase, SingleSelectWithChanged):
self.h_exit_up("bye bye")
class SingleSelectColumns(SingleSelectColumnsSimple):
"""Row of radio buttons. When tabbing over a selection, it is auto selected."""
def when_cursor_moved(self):
self.h_select(self.cursor_line)
class TextBoxInner(npyscreen.MultiLineEdit):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)

View File

@ -14,6 +14,7 @@ import i18n from 'i18n';
import { size } from 'lodash-es';
import { ReactNode, memo, useCallback, useEffect } from 'react';
import { ErrorBoundary } from 'react-error-boundary';
import { usePreselectedImage } from '../../features/parameters/hooks/usePreselectedImage';
import AppErrorBoundaryFallback from './AppErrorBoundaryFallback';
import GlobalHotkeys from './GlobalHotkeys';
import Toaster from './Toaster';
@ -23,13 +24,22 @@ const DEFAULT_CONFIG = {};
interface Props {
config?: PartialAppConfig;
headerComponent?: ReactNode;
selectedImage?: {
imageName: string;
action: 'sendToImg2Img' | 'sendToCanvas' | 'useAllParameters';
};
}
const App = ({ config = DEFAULT_CONFIG, headerComponent }: Props) => {
const App = ({
config = DEFAULT_CONFIG,
headerComponent,
selectedImage,
}: Props) => {
const language = useAppSelector(languageSelector);
const logger = useLogger('system');
const dispatch = useAppDispatch();
const { handlePreselectedImage } = usePreselectedImage();
const handleReset = useCallback(() => {
localStorage.clear();
location.reload();
@ -51,6 +61,10 @@ const App = ({ config = DEFAULT_CONFIG, headerComponent }: Props) => {
dispatch(appStarted());
}, [dispatch]);
useEffect(() => {
handlePreselectedImage(selectedImage);
}, [handlePreselectedImage, selectedImage]);
return (
<ErrorBoundary
onReset={handleReset}

View File

@ -26,6 +26,10 @@ interface Props extends PropsWithChildren {
headerComponent?: ReactNode;
middleware?: Middleware[];
projectId?: string;
selectedImage?: {
imageName: string;
action: 'sendToImg2Img' | 'sendToCanvas' | 'useAllParameters';
};
}
const InvokeAIUI = ({
@ -35,6 +39,7 @@ const InvokeAIUI = ({
headerComponent,
middleware,
projectId,
selectedImage,
}: Props) => {
useEffect(() => {
// configure API client token
@ -81,7 +86,11 @@ const InvokeAIUI = ({
<React.Suspense fallback={<Loading />}>
<ThemeLocaleProvider>
<AppDndContext>
<App config={config} headerComponent={headerComponent} />
<App
config={config}
headerComponent={headerComponent}
selectedImage={selectedImage}
/>
</AppDndContext>
</ThemeLocaleProvider>
</React.Suspense>

View File

@ -1,4 +1,4 @@
import { Flex, MenuItem, Spinner } from '@chakra-ui/react';
import { Flex, MenuItem, Spinner, Text } from '@chakra-ui/react';
import { useAppToaster } from 'app/components/Toaster';
import { useAppDispatch } from 'app/store/storeHooks';
import { setInitialCanvasImage } from 'features/canvas/store/canvasSlice';
@ -249,6 +249,18 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => {
>
{t('gallery.deleteImage')}
</MenuItem>
{metadata?.created_by && (
<Flex
sx={{
padding: '5px 10px',
marginTop: '5px',
}}
>
<Text fontSize="xs" fontWeight="bold">
Created by {metadata?.created_by}
</Text>
</Flex>
)}
</>
);
};

View File

@ -8,7 +8,7 @@ import {
ImageDraggableData,
TypesafeDraggableData,
} from 'features/dnd/types';
import { useMultiselect } from 'features/gallery/hooks/useMultiselect.ts';
import { useMultiselect } from 'features/gallery/hooks/useMultiselect';
import { MouseEvent, memo, useCallback, useMemo, useState } from 'react';
import { FaTrash } from 'react-icons/fa';
import { MdStar, MdStarBorder } from 'react-icons/md';

View File

@ -69,6 +69,9 @@ const ImageMetadataActions = (props: Props) => {
return (
<>
{metadata.created_by && (
<ImageMetadataItem label="Created By" value={metadata.created_by} />
)}
{metadata.generation_mode && (
<ImageMetadataItem
label="Generation Mode"

View File

@ -10,6 +10,7 @@ import ColorInputField from './inputs/ColorInputField';
import ConditioningInputField from './inputs/ConditioningInputField';
import ControlInputField from './inputs/ControlInputField';
import ControlNetModelInputField from './inputs/ControlNetModelInputField';
import DenoiseMaskInputField from './inputs/DenoiseMaskInputField';
import EnumInputField from './inputs/EnumInputField';
import ImageCollectionInputField from './inputs/ImageCollectionInputField';
import ImageInputField from './inputs/ImageInputField';
@ -105,6 +106,19 @@ const InputFieldRenderer = ({ nodeId, fieldName }: InputFieldProps) => {
);
}
if (
field?.type === 'DenoiseMaskField' &&
fieldTemplate?.type === 'DenoiseMaskField'
) {
return (
<DenoiseMaskInputField
nodeId={nodeId}
field={field}
fieldTemplate={fieldTemplate}
/>
);
}
if (
field?.type === 'ConditioningField' &&
fieldTemplate?.type === 'ConditioningField'

View File

@ -0,0 +1,17 @@
import {
DenoiseMaskInputFieldTemplate,
DenoiseMaskInputFieldValue,
FieldComponentProps,
} from 'features/nodes/types/types';
import { memo } from 'react';
const DenoiseMaskInputFieldComponent = (
_props: FieldComponentProps<
DenoiseMaskInputFieldValue,
DenoiseMaskInputFieldTemplate
>
) => {
return null;
};
export default memo(DenoiseMaskInputFieldComponent);

View File

@ -59,6 +59,11 @@ export const FIELDS: Record<FieldType, FieldUIConfig> = {
description: 'Images may be passed between nodes.',
color: 'purple.500',
},
DenoiseMaskField: {
title: 'Denoise Mask',
description: 'Denoise Mask may be passed between nodes',
color: 'red.700',
},
LatentsField: {
title: 'Latents',
description: 'Latents may be passed between nodes.',

View File

@ -65,6 +65,7 @@ export const zFieldType = z.enum([
'string',
'array',
'ImageField',
'DenoiseMaskField',
'LatentsField',
'ConditioningField',
'ControlField',
@ -129,6 +130,7 @@ export type InputFieldTemplate =
| StringInputFieldTemplate
| BooleanInputFieldTemplate
| ImageInputFieldTemplate
| DenoiseMaskInputFieldTemplate
| LatentsInputFieldTemplate
| ConditioningInputFieldTemplate
| UNetInputFieldTemplate
@ -214,6 +216,12 @@ export const zConditioningField = z.object({
});
export type ConditioningField = z.infer<typeof zConditioningField>;
export const zDenoiseMaskField = z.object({
mask_name: z.string().trim().min(1),
masked_latents_name: z.string().trim().min(1).optional(),
});
export type DenoiseMaskFieldValue = z.infer<typeof zDenoiseMaskField>;
export const zIntegerInputFieldValue = zInputFieldValueBase.extend({
type: z.literal('integer'),
value: z.number().optional(),
@ -250,6 +258,14 @@ export const zLatentsInputFieldValue = zInputFieldValueBase.extend({
});
export type LatentsInputFieldValue = z.infer<typeof zLatentsInputFieldValue>;
export const zDenoiseMaskInputFieldValue = zInputFieldValueBase.extend({
type: z.literal('DenoiseMaskField'),
value: zDenoiseMaskField.optional(),
});
export type DenoiseMaskInputFieldValue = z.infer<
typeof zDenoiseMaskInputFieldValue
>;
export const zConditioningInputFieldValue = zInputFieldValueBase.extend({
type: z.literal('ConditioningField'),
value: zConditioningField.optional(),
@ -468,6 +484,7 @@ export const zInputFieldValue = z.discriminatedUnion('type', [
zBooleanInputFieldValue,
zImageInputFieldValue,
zLatentsInputFieldValue,
zDenoiseMaskInputFieldValue,
zConditioningInputFieldValue,
zUNetInputFieldValue,
zClipInputFieldValue,
@ -541,6 +558,11 @@ export type ImageCollectionInputFieldTemplate = InputFieldTemplateBase & {
type: 'ImageCollection';
};
export type DenoiseMaskInputFieldTemplate = InputFieldTemplateBase & {
default: undefined;
type: 'DenoiseMaskField';
};
export type LatentsInputFieldTemplate = InputFieldTemplateBase & {
default: string;
type: 'LatentsField';

View File

@ -8,6 +8,7 @@ import {
ConditioningInputFieldTemplate,
ControlInputFieldTemplate,
ControlNetModelInputFieldTemplate,
DenoiseMaskInputFieldTemplate,
EnumInputFieldTemplate,
FieldType,
FloatInputFieldTemplate,
@ -262,6 +263,19 @@ const buildImageCollectionInputFieldTemplate = ({
return template;
};
const buildDenoiseMaskInputFieldTemplate = ({
schemaObject,
baseField,
}: BuildInputFieldArg): DenoiseMaskInputFieldTemplate => {
const template: DenoiseMaskInputFieldTemplate = {
...baseField,
type: 'DenoiseMaskField',
default: schemaObject.default ?? undefined,
};
return template;
};
const buildLatentsInputFieldTemplate = ({
schemaObject,
baseField,
@ -488,6 +502,12 @@ export const buildInputFieldTemplate = (
baseField,
});
}
if (fieldType === 'DenoiseMaskField') {
return buildDenoiseMaskInputFieldTemplate({
schemaObject: fieldSchema,
baseField,
});
}
if (fieldType === 'LatentsField') {
return buildLatentsInputFieldTemplate({
schemaObject: fieldSchema,

View File

@ -49,6 +49,10 @@ export const buildInputFieldValue = (
fieldValue.value = [];
}
if (template.type === 'DenoiseMaskField') {
fieldValue.value = undefined;
}
if (template.type === 'LatentsField') {
fieldValue.value = undefined;
}

View File

@ -63,7 +63,7 @@ export const addDynamicPromptsToGraph = (
{
source: {
node_id: DYNAMIC_PROMPT,
field: 'prompt_collection',
field: 'collection',
},
destination: {
node_id: ITERATE,

View File

@ -11,9 +11,11 @@ import {
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
POSITIVE_CONDITIONING,
REFINER_SEAMLESS,
SDXL_CANVAS_INPAINT_GRAPH,
SDXL_CANVAS_OUTPAINT_GRAPH,
SDXL_MODEL_LOADER,
SEAMLESS,
} from './constants';
export const addSDXLLoRAsToGraph = (
@ -36,20 +38,25 @@ export const addSDXLLoRAsToGraph = (
| MetadataAccumulatorInvocation
| undefined;
// Handle Seamless Plugs
const unetLoaderId = modelLoaderNodeId;
let clipLoaderId = modelLoaderNodeId;
if ([SEAMLESS, REFINER_SEAMLESS].includes(modelLoaderNodeId)) {
clipLoaderId = SDXL_MODEL_LOADER;
}
if (loraCount > 0) {
// Remove modelLoaderNodeId unet/clip/clip2 connections to feed it to LoRAs
graph.edges = graph.edges.filter(
(e) =>
!(
e.source.node_id === modelLoaderNodeId &&
['unet'].includes(e.source.field)
e.source.node_id === unetLoaderId && ['unet'].includes(e.source.field)
) &&
!(
e.source.node_id === modelLoaderNodeId &&
['clip'].includes(e.source.field)
e.source.node_id === clipLoaderId && ['clip'].includes(e.source.field)
) &&
!(
e.source.node_id === modelLoaderNodeId &&
e.source.node_id === clipLoaderId &&
['clip2'].includes(e.source.field)
)
);
@ -88,7 +95,7 @@ export const addSDXLLoRAsToGraph = (
// first lora = start the lora chain, attach directly to model loader
graph.edges.push({
source: {
node_id: modelLoaderNodeId,
node_id: unetLoaderId,
field: 'unet',
},
destination: {
@ -99,7 +106,7 @@ export const addSDXLLoRAsToGraph = (
graph.edges.push({
source: {
node_id: modelLoaderNodeId,
node_id: clipLoaderId,
field: 'clip',
},
destination: {
@ -110,7 +117,7 @@ export const addSDXLLoRAsToGraph = (
graph.edges.push({
source: {
node_id: modelLoaderNodeId,
node_id: clipLoaderId,
field: 'clip2',
},
destination: {

View File

@ -1,11 +1,15 @@
import { RootState } from 'app/store/store';
import { MetadataAccumulatorInvocation } from 'services/api/types';
import {
MetadataAccumulatorInvocation,
SeamlessModeInvocation,
} from 'services/api/types';
import { NonNullableGraph } from '../../types/types';
import {
CANVAS_OUTPUT,
LATENTS_TO_IMAGE,
MASK_BLUR,
METADATA_ACCUMULATOR,
REFINER_SEAMLESS,
SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH,
SDXL_CANVAS_INPAINT_GRAPH,
SDXL_CANVAS_OUTPAINT_GRAPH,
@ -21,7 +25,8 @@ import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt';
export const addSDXLRefinerToGraph = (
state: RootState,
graph: NonNullableGraph,
baseNodeId: string
baseNodeId: string,
modelLoaderNodeId?: string
): void => {
const {
refinerModel,
@ -33,6 +38,8 @@ export const addSDXLRefinerToGraph = (
refinerStart,
} = state.sdxl;
const { seamlessXAxis, seamlessYAxis } = state.generation;
if (!refinerModel) {
return;
}
@ -53,6 +60,10 @@ export const addSDXLRefinerToGraph = (
metadataAccumulator.refiner_steps = refinerSteps;
}
const modelLoaderId = modelLoaderNodeId
? modelLoaderNodeId
: SDXL_MODEL_LOADER;
// Construct Style Prompt
const { craftedPositiveStylePrompt, craftedNegativeStylePrompt } =
craftSDXLStylePrompt(state, true);
@ -65,10 +76,7 @@ export const addSDXLRefinerToGraph = (
graph.edges = graph.edges.filter(
(e) =>
!(
e.source.node_id === SDXL_MODEL_LOADER &&
['vae'].includes(e.source.field)
)
!(e.source.node_id === modelLoaderId && ['vae'].includes(e.source.field))
);
graph.nodes[SDXL_REFINER_MODEL_LOADER] = {
@ -98,8 +106,39 @@ export const addSDXLRefinerToGraph = (
denoising_end: 1,
};
graph.edges.push(
{
// Add Seamless To Refiner
if (seamlessXAxis || seamlessYAxis) {
graph.nodes[REFINER_SEAMLESS] = {
id: REFINER_SEAMLESS,
type: 'seamless',
seamless_x: seamlessXAxis,
seamless_y: seamlessYAxis,
} as SeamlessModeInvocation;
graph.edges.push(
{
source: {
node_id: SDXL_REFINER_MODEL_LOADER,
field: 'unet',
},
destination: {
node_id: REFINER_SEAMLESS,
field: 'unet',
},
},
{
source: {
node_id: REFINER_SEAMLESS,
field: 'unet',
},
destination: {
node_id: SDXL_REFINER_DENOISE_LATENTS,
field: 'unet',
},
}
);
} else {
graph.edges.push({
source: {
node_id: SDXL_REFINER_MODEL_LOADER,
field: 'unet',
@ -108,7 +147,10 @@ export const addSDXLRefinerToGraph = (
node_id: SDXL_REFINER_DENOISE_LATENTS,
field: 'unet',
},
},
});
}
graph.edges.push(
{
source: {
node_id: SDXL_REFINER_MODEL_LOADER,

View File

@ -0,0 +1,109 @@
import { RootState } from 'app/store/store';
import { SeamlessModeInvocation } from 'services/api/types';
import { NonNullableGraph } from '../../types/types';
import {
CANVAS_COHERENCE_DENOISE_LATENTS,
CANVAS_INPAINT_GRAPH,
CANVAS_OUTPAINT_GRAPH,
DENOISE_LATENTS,
SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH,
SDXL_CANVAS_INPAINT_GRAPH,
SDXL_CANVAS_OUTPAINT_GRAPH,
SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH,
SDXL_DENOISE_LATENTS,
SDXL_IMAGE_TO_IMAGE_GRAPH,
SDXL_TEXT_TO_IMAGE_GRAPH,
SEAMLESS,
} from './constants';
export const addSeamlessToLinearGraph = (
state: RootState,
graph: NonNullableGraph,
modelLoaderNodeId: string
): void => {
// Remove Existing UNet Connections
const { seamlessXAxis, seamlessYAxis } = state.generation;
graph.nodes[SEAMLESS] = {
id: SEAMLESS,
type: 'seamless',
seamless_x: seamlessXAxis,
seamless_y: seamlessYAxis,
} as SeamlessModeInvocation;
let denoisingNodeId = DENOISE_LATENTS;
if (
graph.id === SDXL_TEXT_TO_IMAGE_GRAPH ||
graph.id === SDXL_IMAGE_TO_IMAGE_GRAPH ||
graph.id === SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH ||
graph.id === SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH ||
graph.id === SDXL_CANVAS_INPAINT_GRAPH ||
graph.id === SDXL_CANVAS_OUTPAINT_GRAPH
) {
denoisingNodeId = SDXL_DENOISE_LATENTS;
}
graph.edges = graph.edges.filter(
(e) =>
!(
e.source.node_id === modelLoaderNodeId &&
['unet'].includes(e.source.field)
) &&
!(
e.source.node_id === modelLoaderNodeId &&
['vae'].includes(e.source.field)
)
);
graph.edges.push(
{
source: {
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
node_id: SEAMLESS,
field: 'unet',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'vae',
},
destination: {
node_id: SEAMLESS,
field: 'vae',
},
},
{
source: {
node_id: SEAMLESS,
field: 'unet',
},
destination: {
node_id: denoisingNodeId,
field: 'unet',
},
}
);
if (
graph.id == CANVAS_INPAINT_GRAPH ||
graph.id === CANVAS_OUTPAINT_GRAPH ||
graph.id === SDXL_CANVAS_INPAINT_GRAPH ||
graph.id === SDXL_CANVAS_OUTPAINT_GRAPH
) {
graph.edges.push({
source: {
node_id: SEAMLESS,
field: 'unet',
},
destination: {
node_id: CANVAS_COHERENCE_DENOISE_LATENTS,
field: 'unet',
},
});
}
};

View File

@ -9,6 +9,7 @@ import {
CANVAS_TEXT_TO_IMAGE_GRAPH,
IMAGE_TO_IMAGE_GRAPH,
IMAGE_TO_LATENTS,
INPAINT_CREATE_MASK,
INPAINT_IMAGE,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
@ -30,6 +31,11 @@ export const addVAEToGraph = (
modelLoaderNodeId: string = MAIN_MODEL_LOADER
): void => {
const { vae } = state.generation;
const { boundingBoxScaleMethod } = state.canvas;
const isUsingScaledDimensions = ['auto', 'manual'].includes(
boundingBoxScaleMethod
);
const isAutoVae = !vae;
const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as
@ -76,7 +82,7 @@ export const addVAEToGraph = (
field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae',
},
destination: {
node_id: CANVAS_OUTPUT,
node_id: isUsingScaledDimensions ? LATENTS_TO_IMAGE : CANVAS_OUTPUT,
field: 'vae',
},
});
@ -117,6 +123,16 @@ export const addVAEToGraph = (
field: 'vae',
},
},
{
source: {
node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER,
field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'vae',
},
},
{
source: {
node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER,

View File

@ -2,15 +2,12 @@ import { logger } from 'app/logging/logger';
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { initialGenerationState } from 'features/parameters/store/generationSlice';
import {
ImageDTO,
ImageResizeInvocation,
ImageToLatentsInvocation,
} from 'services/api/types';
import { ImageDTO, ImageToLatentsInvocation } from 'services/api/types';
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
@ -19,12 +16,14 @@ import {
CLIP_SKIP,
DENOISE_LATENTS,
IMAGE_TO_LATENTS,
IMG2IMG_RESIZE,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
RESIZE,
SEAMLESS,
} from './constants';
/**
@ -43,21 +42,34 @@ export const buildCanvasImageToImageGraph = (
scheduler,
steps,
img2imgStrength: strength,
vaePrecision,
clipSkip,
shouldUseCpuNoise,
shouldUseNoiseSettings,
seamlessXAxis,
seamlessYAxis,
} = state.generation;
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const { shouldAutoSave } = state.canvas;
const {
scaledBoundingBoxDimensions,
boundingBoxScaleMethod,
shouldAutoSave,
} = state.canvas;
const isUsingScaledDimensions = ['auto', 'manual'].includes(
boundingBoxScaleMethod
);
if (!model) {
log.error('No model found in state');
throw new Error('No model found in state');
}
let modelLoaderNodeId = MAIN_MODEL_LOADER;
const use_cpu = shouldUseNoiseSettings
? shouldUseCpuNoise
: initialGenerationState.shouldUseCpuNoise;
@ -75,9 +87,9 @@ export const buildCanvasImageToImageGraph = (
const graph: NonNullableGraph = {
id: CANVAS_IMAGE_TO_IMAGE_GRAPH,
nodes: {
[MAIN_MODEL_LOADER]: {
[modelLoaderNodeId]: {
type: 'main_model_loader',
id: MAIN_MODEL_LOADER,
id: modelLoaderNodeId,
is_intermediate: true,
model,
},
@ -104,15 +116,17 @@ export const buildCanvasImageToImageGraph = (
id: NOISE,
is_intermediate: true,
use_cpu,
width: !isUsingScaledDimensions
? width
: scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions
? height
: scaledBoundingBoxDimensions.height,
},
[IMAGE_TO_LATENTS]: {
type: 'i2l',
id: IMAGE_TO_LATENTS,
is_intermediate: true,
// must be set manually later, bc `fit` parameter may require a resize node inserted
// image: {
// image_name: initialImage.image_name,
// },
},
[DENOISE_LATENTS]: {
type: 'denoise_latents',
@ -134,7 +148,7 @@ export const buildCanvasImageToImageGraph = (
// Connect Model Loader to CLIP Skip and UNet
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
@ -144,7 +158,7 @@ export const buildCanvasImageToImageGraph = (
},
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
@ -214,82 +228,84 @@ export const buildCanvasImageToImageGraph = (
field: 'latents',
},
},
// Decode the denoised latents to an image
],
};
// Decode Latents To Image & Handle Scaled Before Processing
if (isUsingScaledDimensions) {
graph.nodes[IMG2IMG_RESIZE] = {
id: IMG2IMG_RESIZE,
type: 'img_resize',
is_intermediate: true,
image: initialImage,
width: scaledBoundingBoxDimensions.width,
height: scaledBoundingBoxDimensions.height,
};
graph.nodes[LATENTS_TO_IMAGE] = {
id: LATENTS_TO_IMAGE,
type: 'l2i',
is_intermediate: true,
fp32: vaePrecision === 'fp32' ? true : false,
};
graph.nodes[CANVAS_OUTPUT] = {
id: CANVAS_OUTPUT,
type: 'img_resize',
is_intermediate: !shouldAutoSave,
width: width,
height: height,
};
graph.edges.push(
{
source: {
node_id: IMG2IMG_RESIZE,
field: 'image',
},
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'image',
},
},
{
source: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: CANVAS_OUTPUT,
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
],
};
// handle `fit`
if (initialImage.width !== width || initialImage.height !== height) {
// The init image needs to be resized to the specified width and height before being passed to `IMAGE_TO_LATENTS`
// Create a resize node, explicitly setting its image
const resizeNode: ImageResizeInvocation = {
id: RESIZE,
type: 'img_resize',
image: {
image_name: initialImage.image_name,
},
is_intermediate: true,
width,
height,
};
graph.nodes[RESIZE] = resizeNode;
// The `RESIZE` node then passes its image to `IMAGE_TO_LATENTS`
graph.edges.push({
source: { node_id: RESIZE, field: 'image' },
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'image',
},
});
// The `RESIZE` node also passes its width and height to `NOISE`
graph.edges.push({
source: { node_id: RESIZE, field: 'width' },
destination: {
node_id: NOISE,
field: 'width',
},
});
graph.edges.push({
source: { node_id: RESIZE, field: 'height' },
destination: {
node_id: NOISE,
field: 'height',
},
});
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'image',
},
}
);
} else {
// We are not resizing, so we need to set the image on the `IMAGE_TO_LATENTS` node explicitly
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image = {
image_name: initialImage.image_name,
graph.nodes[CANVAS_OUTPUT] = {
type: 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: !shouldAutoSave,
fp32: vaePrecision === 'fp32' ? true : false,
};
// Pass the image's dimensions to the `NOISE` node
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image =
initialImage;
graph.edges.push({
source: { node_id: IMAGE_TO_LATENTS, field: 'width' },
destination: {
node_id: NOISE,
field: 'width',
source: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
});
graph.edges.push({
source: { node_id: IMAGE_TO_LATENTS, field: 'height' },
destination: {
node_id: NOISE,
field: 'height',
node_id: CANVAS_OUTPUT,
field: 'latents',
},
});
}
@ -300,8 +316,10 @@ export const buildCanvasImageToImageGraph = (
type: 'metadata_accumulator',
generation_mode: 'img2img',
cfg_scale,
height,
width,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions
? height
: scaledBoundingBoxDimensions.height,
positive_prompt: '', // set in addDynamicPromptsToGraph
negative_prompt: negativePrompt,
model,
@ -328,11 +346,17 @@ export const buildCanvasImageToImageGraph = (
},
});
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// add LoRA support
addLoRAsToGraph(state, graph, DENOISE_LATENTS);
// optionally add custom VAE
addVAEToGraph(state, graph, MAIN_MODEL_LOADER);
addVAEToGraph(state, graph, modelLoaderNodeId);
// add dynamic prompts - also sets up core iteration and seed
addDynamicPromptsToGraph(state, graph);

View File

@ -2,6 +2,7 @@ import { logger } from 'app/logging/logger';
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import {
CreateDenoiseMaskInvocation,
ImageBlurInvocation,
ImageDTO,
ImageToLatentsInvocation,
@ -12,16 +13,18 @@ import {
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
CANVAS_INPAINT_GRAPH,
CANVAS_OUTPUT,
CANVAS_COHERENCE_DENOISE_LATENTS,
CANVAS_COHERENCE_NOISE,
CANVAS_COHERENCE_NOISE_INCREMENT,
CANVAS_INPAINT_GRAPH,
CANVAS_OUTPUT,
CLIP_SKIP,
DENOISE_LATENTS,
INPAINT_CREATE_MASK,
INPAINT_IMAGE,
INPAINT_IMAGE_RESIZE_DOWN,
INPAINT_IMAGE_RESIZE_UP,
@ -36,6 +39,7 @@ import {
POSITIVE_CONDITIONING,
RANDOM_INT,
RANGE_OF_SIZE,
SEAMLESS,
} from './constants';
/**
@ -66,6 +70,8 @@ export const buildCanvasInpaintGraph = (
canvasCoherenceSteps,
canvasCoherenceStrength,
clipSkip,
seamlessXAxis,
seamlessYAxis,
} = state.generation;
if (!model) {
@ -83,6 +89,8 @@ export const buildCanvasInpaintGraph = (
shouldAutoSave,
} = state.canvas;
let modelLoaderNodeId = MAIN_MODEL_LOADER;
const use_cpu = shouldUseNoiseSettings
? shouldUseCpuNoise
: shouldUseCpuNoise;
@ -90,9 +98,9 @@ export const buildCanvasInpaintGraph = (
const graph: NonNullableGraph = {
id: CANVAS_INPAINT_GRAPH,
nodes: {
[MAIN_MODEL_LOADER]: {
[modelLoaderNodeId]: {
type: 'main_model_loader',
id: MAIN_MODEL_LOADER,
id: modelLoaderNodeId,
is_intermediate: true,
model,
},
@ -127,6 +135,12 @@ export const buildCanvasInpaintGraph = (
is_intermediate: true,
fp32: vaePrecision === 'fp32' ? true : false,
},
[INPAINT_CREATE_MASK]: {
type: 'create_denoise_mask',
id: INPAINT_CREATE_MASK,
is_intermediate: true,
fp32: vaePrecision === 'fp32' ? true : false,
},
[NOISE]: {
type: 'noise',
id: NOISE,
@ -196,7 +210,7 @@ export const buildCanvasInpaintGraph = (
// Connect Model Loader to CLIP Skip and UNet
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
@ -206,7 +220,7 @@ export const buildCanvasInpaintGraph = (
},
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
@ -276,16 +290,27 @@ export const buildCanvasInpaintGraph = (
field: 'latents',
},
},
// Create Inpaint Mask
{
source: {
node_id: MASK_BLUR,
field: 'image',
},
destination: {
node_id: DENOISE_LATENTS,
node_id: INPAINT_CREATE_MASK,
field: 'mask',
},
},
{
source: {
node_id: INPAINT_CREATE_MASK,
field: 'denoise_mask',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'denoise_mask',
},
},
// Iterate
{
source: {
@ -330,7 +355,7 @@ export const buildCanvasInpaintGraph = (
},
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
@ -459,6 +484,16 @@ export const buildCanvasInpaintGraph = (
field: 'image',
},
},
{
source: {
node_id: INPAINT_IMAGE_RESIZE_UP,
field: 'image',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'image',
},
},
// Color Correct The Inpainted Result
{
source: {
@ -516,6 +551,10 @@ export const buildCanvasInpaintGraph = (
...(graph.nodes[MASK_BLUR] as ImageBlurInvocation),
image: canvasMaskImage,
};
graph.nodes[INPAINT_CREATE_MASK] = {
...(graph.nodes[INPAINT_CREATE_MASK] as CreateDenoiseMaskInvocation),
image: canvasInitImage,
};
graph.edges.push(
// Color Correct The Inpainted Result
@ -562,11 +601,17 @@ export const buildCanvasInpaintGraph = (
(graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed;
}
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// Add VAE
addVAEToGraph(state, graph, MAIN_MODEL_LOADER);
addVAEToGraph(state, graph, modelLoaderNodeId);
// add LoRA support
addLoRAsToGraph(state, graph, DENOISE_LATENTS, MAIN_MODEL_LOADER);
addLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId);
// add controlnet, mutating `graph`
addControlNetToLinearGraph(state, graph, DENOISE_LATENTS);

View File

@ -14,16 +14,18 @@ import {
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
CANVAS_OUTPAINT_GRAPH,
CANVAS_OUTPUT,
CANVAS_COHERENCE_DENOISE_LATENTS,
CANVAS_COHERENCE_NOISE,
CANVAS_COHERENCE_NOISE_INCREMENT,
CANVAS_OUTPAINT_GRAPH,
CANVAS_OUTPUT,
CLIP_SKIP,
DENOISE_LATENTS,
INPAINT_CREATE_MASK,
INPAINT_IMAGE,
INPAINT_IMAGE_RESIZE_DOWN,
INPAINT_IMAGE_RESIZE_UP,
@ -42,6 +44,7 @@ import {
POSITIVE_CONDITIONING,
RANDOM_INT,
RANGE_OF_SIZE,
SEAMLESS,
} from './constants';
/**
@ -74,6 +77,8 @@ export const buildCanvasOutpaintGraph = (
tileSize,
infillMethod,
clipSkip,
seamlessXAxis,
seamlessYAxis,
} = state.generation;
if (!model) {
@ -91,6 +96,8 @@ export const buildCanvasOutpaintGraph = (
shouldAutoSave,
} = state.canvas;
let modelLoaderNodeId = MAIN_MODEL_LOADER;
const use_cpu = shouldUseNoiseSettings
? shouldUseCpuNoise
: shouldUseCpuNoise;
@ -98,9 +105,9 @@ export const buildCanvasOutpaintGraph = (
const graph: NonNullableGraph = {
id: CANVAS_OUTPAINT_GRAPH,
nodes: {
[MAIN_MODEL_LOADER]: {
[modelLoaderNodeId]: {
type: 'main_model_loader',
id: MAIN_MODEL_LOADER,
id: modelLoaderNodeId,
is_intermediate: true,
model,
},
@ -153,6 +160,12 @@ export const buildCanvasOutpaintGraph = (
use_cpu,
is_intermediate: true,
},
[INPAINT_CREATE_MASK]: {
type: 'create_denoise_mask',
id: INPAINT_CREATE_MASK,
is_intermediate: true,
fp32: vaePrecision === 'fp32' ? true : false,
},
[DENOISE_LATENTS]: {
type: 'denoise_latents',
id: DENOISE_LATENTS,
@ -215,7 +228,7 @@ export const buildCanvasOutpaintGraph = (
// Connect Model Loader To UNet & Clip Skip
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
@ -225,7 +238,7 @@ export const buildCanvasOutpaintGraph = (
},
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
@ -317,16 +330,27 @@ export const buildCanvasOutpaintGraph = (
field: 'latents',
},
},
// Create Inpaint Mask
{
source: {
node_id: MASK_BLUR,
field: 'image',
},
destination: {
node_id: DENOISE_LATENTS,
node_id: INPAINT_CREATE_MASK,
field: 'mask',
},
},
{
source: {
node_id: INPAINT_CREATE_MASK,
field: 'denoise_mask',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'denoise_mask',
},
},
// Iterate
{
source: {
@ -371,7 +395,7 @@ export const buildCanvasOutpaintGraph = (
},
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
@ -522,6 +546,16 @@ export const buildCanvasOutpaintGraph = (
field: 'image',
},
},
{
source: {
node_id: INPAINT_INFILL,
field: 'image',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'image',
},
},
// Take combined mask and resize and then blur
{
source: {
@ -640,6 +674,16 @@ export const buildCanvasOutpaintGraph = (
field: 'image',
},
},
{
source: {
node_id: INPAINT_INFILL,
field: 'image',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'image',
},
},
// Color Correct The Inpainted Result
{
source: {
@ -694,11 +738,17 @@ export const buildCanvasOutpaintGraph = (
(graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed;
}
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// Add VAE
addVAEToGraph(state, graph, MAIN_MODEL_LOADER);
addVAEToGraph(state, graph, modelLoaderNodeId);
// add LoRA support
addLoRAsToGraph(state, graph, DENOISE_LATENTS, MAIN_MODEL_LOADER);
addLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId);
// add controlnet, mutating `graph`
addControlNetToLinearGraph(state, graph, DENOISE_LATENTS);

View File

@ -2,29 +2,29 @@ import { logger } from 'app/logging/logger';
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { initialGenerationState } from 'features/parameters/store/generationSlice';
import {
ImageDTO,
ImageResizeInvocation,
ImageToLatentsInvocation,
} from 'services/api/types';
import { ImageDTO, ImageToLatentsInvocation } from 'services/api/types';
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph';
import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
CANVAS_OUTPUT,
IMAGE_TO_LATENTS,
IMG2IMG_RESIZE,
LATENTS_TO_IMAGE,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
RESIZE,
REFINER_SEAMLESS,
SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH,
SDXL_DENOISE_LATENTS,
SDXL_MODEL_LOADER,
SEAMLESS,
} from './constants';
import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt';
@ -47,6 +47,8 @@ export const buildCanvasSDXLImageToImageGraph = (
clipSkip,
shouldUseCpuNoise,
shouldUseNoiseSettings,
seamlessXAxis,
seamlessYAxis,
} = state.generation;
const {
@ -59,13 +61,24 @@ export const buildCanvasSDXLImageToImageGraph = (
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const { shouldAutoSave } = state.canvas;
const {
scaledBoundingBoxDimensions,
boundingBoxScaleMethod,
shouldAutoSave,
} = state.canvas;
const isUsingScaledDimensions = ['auto', 'manual'].includes(
boundingBoxScaleMethod
);
if (!model) {
log.error('No model found in state');
throw new Error('No model found in state');
}
// Model Loader ID
let modelLoaderNodeId = SDXL_MODEL_LOADER;
const use_cpu = shouldUseNoiseSettings
? shouldUseCpuNoise
: initialGenerationState.shouldUseCpuNoise;
@ -87,9 +100,9 @@ export const buildCanvasSDXLImageToImageGraph = (
const graph: NonNullableGraph = {
id: SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH,
nodes: {
[SDXL_MODEL_LOADER]: {
[modelLoaderNodeId]: {
type: 'sdxl_model_loader',
id: SDXL_MODEL_LOADER,
id: modelLoaderNodeId,
model,
},
[POSITIVE_CONDITIONING]: {
@ -109,16 +122,18 @@ export const buildCanvasSDXLImageToImageGraph = (
id: NOISE,
is_intermediate: true,
use_cpu,
width: !isUsingScaledDimensions
? width
: scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions
? height
: scaledBoundingBoxDimensions.height,
},
[IMAGE_TO_LATENTS]: {
type: 'i2l',
id: IMAGE_TO_LATENTS,
is_intermediate: true,
fp32: vaePrecision === 'fp32' ? true : false,
// must be set manually later, bc `fit` parameter may require a resize node inserted
// image: {
// image_name: initialImage.image_name,
// },
},
[SDXL_DENOISE_LATENTS]: {
type: 'denoise_latents',
@ -132,18 +147,12 @@ export const buildCanvasSDXLImageToImageGraph = (
: 1 - strength,
denoising_end: shouldUseSDXLRefiner ? refinerStart : 1,
},
[CANVAS_OUTPUT]: {
type: 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: !shouldAutoSave,
fp32: vaePrecision === 'fp32' ? true : false,
},
},
edges: [
// Connect Model Loader To UNet & CLIP
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
@ -153,7 +162,7 @@ export const buildCanvasSDXLImageToImageGraph = (
},
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
@ -163,7 +172,7 @@ export const buildCanvasSDXLImageToImageGraph = (
},
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip2',
},
destination: {
@ -173,7 +182,7 @@ export const buildCanvasSDXLImageToImageGraph = (
},
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
@ -183,7 +192,7 @@ export const buildCanvasSDXLImageToImageGraph = (
},
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip2',
},
destination: {
@ -232,82 +241,84 @@ export const buildCanvasSDXLImageToImageGraph = (
field: 'latents',
},
},
// Decode denoised latents to an image
],
};
// Decode Latents To Image & Handle Scaled Before Processing
if (isUsingScaledDimensions) {
graph.nodes[IMG2IMG_RESIZE] = {
id: IMG2IMG_RESIZE,
type: 'img_resize',
is_intermediate: true,
image: initialImage,
width: scaledBoundingBoxDimensions.width,
height: scaledBoundingBoxDimensions.height,
};
graph.nodes[LATENTS_TO_IMAGE] = {
id: LATENTS_TO_IMAGE,
type: 'l2i',
is_intermediate: true,
fp32: vaePrecision === 'fp32' ? true : false,
};
graph.nodes[CANVAS_OUTPUT] = {
id: CANVAS_OUTPUT,
type: 'img_resize',
is_intermediate: !shouldAutoSave,
width: width,
height: height,
};
graph.edges.push(
{
source: {
node_id: IMG2IMG_RESIZE,
field: 'image',
},
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'image',
},
},
{
source: {
node_id: SDXL_DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: CANVAS_OUTPUT,
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
],
};
// handle `fit`
if (initialImage.width !== width || initialImage.height !== height) {
// The init image needs to be resized to the specified width and height before being passed to `IMAGE_TO_LATENTS`
// Create a resize node, explicitly setting its image
const resizeNode: ImageResizeInvocation = {
id: RESIZE,
type: 'img_resize',
image: {
image_name: initialImage.image_name,
},
is_intermediate: true,
width,
height,
};
graph.nodes[RESIZE] = resizeNode;
// The `RESIZE` node then passes its image to `IMAGE_TO_LATENTS`
graph.edges.push({
source: { node_id: RESIZE, field: 'image' },
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'image',
},
});
// The `RESIZE` node also passes its width and height to `NOISE`
graph.edges.push({
source: { node_id: RESIZE, field: 'width' },
destination: {
node_id: NOISE,
field: 'width',
},
});
graph.edges.push({
source: { node_id: RESIZE, field: 'height' },
destination: {
node_id: NOISE,
field: 'height',
},
});
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'image',
},
}
);
} else {
// We are not resizing, so we need to set the image on the `IMAGE_TO_LATENTS` node explicitly
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image = {
image_name: initialImage.image_name,
graph.nodes[CANVAS_OUTPUT] = {
type: 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: !shouldAutoSave,
fp32: vaePrecision === 'fp32' ? true : false,
};
// Pass the image's dimensions to the `NOISE` node
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image =
initialImage;
graph.edges.push({
source: { node_id: IMAGE_TO_LATENTS, field: 'width' },
destination: {
node_id: NOISE,
field: 'width',
source: {
node_id: SDXL_DENOISE_LATENTS,
field: 'latents',
},
});
graph.edges.push({
source: { node_id: IMAGE_TO_LATENTS, field: 'height' },
destination: {
node_id: NOISE,
field: 'height',
node_id: CANVAS_OUTPUT,
field: 'latents',
},
});
}
@ -318,8 +329,10 @@ export const buildCanvasSDXLImageToImageGraph = (
type: 'metadata_accumulator',
generation_mode: 'img2img',
cfg_scale,
height,
width,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions
? height
: scaledBoundingBoxDimensions.height,
positive_prompt: '', // set in addDynamicPromptsToGraph
negative_prompt: negativePrompt,
model,
@ -346,16 +359,23 @@ export const buildCanvasSDXLImageToImageGraph = (
},
});
// add LoRA support
addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER);
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// Add Refiner if enabled
if (shouldUseSDXLRefiner) {
addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS);
modelLoaderNodeId = REFINER_SEAMLESS;
}
// optionally add custom VAE
addVAEToGraph(state, graph, SDXL_MODEL_LOADER);
addVAEToGraph(state, graph, modelLoaderNodeId);
// add LoRA support
addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId);
// add dynamic prompts - also sets up core iteration and seed
addDynamicPromptsToGraph(state, graph);

View File

@ -2,6 +2,7 @@ import { logger } from 'app/logging/logger';
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import {
CreateDenoiseMaskInvocation,
ImageBlurInvocation,
ImageDTO,
ImageToLatentsInvocation,
@ -13,13 +14,15 @@ import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph';
import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
CANVAS_OUTPUT,
CANVAS_COHERENCE_DENOISE_LATENTS,
CANVAS_COHERENCE_NOISE,
CANVAS_COHERENCE_NOISE_INCREMENT,
CANVAS_OUTPUT,
INPAINT_CREATE_MASK,
INPAINT_IMAGE,
INPAINT_IMAGE_RESIZE_DOWN,
INPAINT_IMAGE_RESIZE_UP,
@ -33,9 +36,11 @@ import {
POSITIVE_CONDITIONING,
RANDOM_INT,
RANGE_OF_SIZE,
REFINER_SEAMLESS,
SDXL_CANVAS_INPAINT_GRAPH,
SDXL_DENOISE_LATENTS,
SDXL_MODEL_LOADER,
SEAMLESS,
} from './constants';
import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt';
@ -65,6 +70,8 @@ export const buildCanvasSDXLInpaintGraph = (
maskBlurMethod,
canvasCoherenceSteps,
canvasCoherenceStrength,
seamlessXAxis,
seamlessYAxis,
} = state.generation;
const {
@ -89,6 +96,8 @@ export const buildCanvasSDXLInpaintGraph = (
shouldAutoSave,
} = state.canvas;
let modelLoaderNodeId = SDXL_MODEL_LOADER;
const use_cpu = shouldUseNoiseSettings
? shouldUseCpuNoise
: shouldUseCpuNoise;
@ -100,9 +109,9 @@ export const buildCanvasSDXLInpaintGraph = (
const graph: NonNullableGraph = {
id: SDXL_CANVAS_INPAINT_GRAPH,
nodes: {
[SDXL_MODEL_LOADER]: {
[modelLoaderNodeId]: {
type: 'sdxl_model_loader',
id: SDXL_MODEL_LOADER,
id: modelLoaderNodeId,
model,
},
[POSITIVE_CONDITIONING]: {
@ -136,6 +145,12 @@ export const buildCanvasSDXLInpaintGraph = (
use_cpu,
is_intermediate: true,
},
[INPAINT_CREATE_MASK]: {
type: 'create_denoise_mask',
id: INPAINT_CREATE_MASK,
is_intermediate: true,
fp32: vaePrecision === 'fp32' ? true : false,
},
[SDXL_DENOISE_LATENTS]: {
type: 'denoise_latents',
id: SDXL_DENOISE_LATENTS,
@ -201,7 +216,7 @@ export const buildCanvasSDXLInpaintGraph = (
// Connect Model Loader to UNet and CLIP
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
@ -211,7 +226,7 @@ export const buildCanvasSDXLInpaintGraph = (
},
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
@ -221,7 +236,7 @@ export const buildCanvasSDXLInpaintGraph = (
},
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip2',
},
destination: {
@ -231,7 +246,7 @@ export const buildCanvasSDXLInpaintGraph = (
},
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
@ -241,7 +256,7 @@ export const buildCanvasSDXLInpaintGraph = (
},
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip2',
},
destination: {
@ -290,16 +305,27 @@ export const buildCanvasSDXLInpaintGraph = (
field: 'latents',
},
},
// Create Inpaint Mask
{
source: {
node_id: MASK_BLUR,
field: 'image',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
node_id: INPAINT_CREATE_MASK,
field: 'mask',
},
},
{
source: {
node_id: INPAINT_CREATE_MASK,
field: 'denoise_mask',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'denoise_mask',
},
},
// Iterate
{
source: {
@ -344,7 +370,7 @@ export const buildCanvasSDXLInpaintGraph = (
},
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
@ -473,6 +499,16 @@ export const buildCanvasSDXLInpaintGraph = (
field: 'image',
},
},
{
source: {
node_id: INPAINT_IMAGE_RESIZE_UP,
field: 'image',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'image',
},
},
// Color Correct The Inpainted Result
{
source: {
@ -530,6 +566,10 @@ export const buildCanvasSDXLInpaintGraph = (
...(graph.nodes[MASK_BLUR] as ImageBlurInvocation),
image: canvasMaskImage,
};
graph.nodes[INPAINT_CREATE_MASK] = {
...(graph.nodes[INPAINT_CREATE_MASK] as CreateDenoiseMaskInvocation),
image: canvasInitImage,
};
graph.edges.push(
// Color Correct The Inpainted Result
@ -576,16 +616,28 @@ export const buildCanvasSDXLInpaintGraph = (
(graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed;
}
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// Add Refiner if enabled
if (shouldUseSDXLRefiner) {
addSDXLRefinerToGraph(state, graph, CANVAS_COHERENCE_DENOISE_LATENTS);
addSDXLRefinerToGraph(
state,
graph,
CANVAS_COHERENCE_DENOISE_LATENTS,
modelLoaderNodeId
);
modelLoaderNodeId = REFINER_SEAMLESS;
}
// optionally add custom VAE
addVAEToGraph(state, graph, SDXL_MODEL_LOADER);
addVAEToGraph(state, graph, modelLoaderNodeId);
// add LoRA support
addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER);
addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId);
// add controlnet, mutating `graph`
addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);

View File

@ -15,13 +15,15 @@ import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph';
import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
CANVAS_OUTPUT,
CANVAS_COHERENCE_DENOISE_LATENTS,
CANVAS_COHERENCE_NOISE,
CANVAS_COHERENCE_NOISE_INCREMENT,
CANVAS_OUTPUT,
INPAINT_CREATE_MASK,
INPAINT_IMAGE,
INPAINT_IMAGE_RESIZE_DOWN,
INPAINT_IMAGE_RESIZE_UP,
@ -39,9 +41,11 @@ import {
POSITIVE_CONDITIONING,
RANDOM_INT,
RANGE_OF_SIZE,
REFINER_SEAMLESS,
SDXL_CANVAS_OUTPAINT_GRAPH,
SDXL_DENOISE_LATENTS,
SDXL_MODEL_LOADER,
SEAMLESS,
} from './constants';
import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt';
@ -73,6 +77,8 @@ export const buildCanvasSDXLOutpaintGraph = (
canvasCoherenceStrength,
tileSize,
infillMethod,
seamlessXAxis,
seamlessYAxis,
} = state.generation;
const {
@ -97,6 +103,8 @@ export const buildCanvasSDXLOutpaintGraph = (
shouldAutoSave,
} = state.canvas;
let modelLoaderNodeId = SDXL_MODEL_LOADER;
const use_cpu = shouldUseNoiseSettings
? shouldUseCpuNoise
: shouldUseCpuNoise;
@ -156,6 +164,12 @@ export const buildCanvasSDXLOutpaintGraph = (
use_cpu,
is_intermediate: true,
},
[INPAINT_CREATE_MASK]: {
type: 'create_denoise_mask',
id: INPAINT_CREATE_MASK,
is_intermediate: true,
fp32: vaePrecision === 'fp32' ? true : false,
},
[SDXL_DENOISE_LATENTS]: {
type: 'denoise_latents',
id: SDXL_DENOISE_LATENTS,
@ -331,16 +345,27 @@ export const buildCanvasSDXLOutpaintGraph = (
field: 'latents',
},
},
// Create Inpaint Mask
{
source: {
node_id: MASK_BLUR,
field: 'image',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
node_id: INPAINT_CREATE_MASK,
field: 'mask',
},
},
{
source: {
node_id: INPAINT_CREATE_MASK,
field: 'denoise_mask',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'denoise_mask',
},
},
// Iterate
{
source: {
@ -537,6 +562,16 @@ export const buildCanvasSDXLOutpaintGraph = (
field: 'image',
},
},
{
source: {
node_id: INPAINT_INFILL,
field: 'image',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'image',
},
},
// Take combined mask and resize and then blur
{
source: {
@ -655,6 +690,16 @@ export const buildCanvasSDXLOutpaintGraph = (
field: 'image',
},
},
{
source: {
node_id: INPAINT_INFILL,
field: 'image',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'image',
},
},
// Color Correct The Inpainted Result
{
source: {
@ -709,16 +754,28 @@ export const buildCanvasSDXLOutpaintGraph = (
(graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed;
}
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// Add Refiner if enabled
if (shouldUseSDXLRefiner) {
addSDXLRefinerToGraph(state, graph, CANVAS_COHERENCE_DENOISE_LATENTS);
addSDXLRefinerToGraph(
state,
graph,
CANVAS_COHERENCE_DENOISE_LATENTS,
modelLoaderNodeId
);
modelLoaderNodeId = REFINER_SEAMLESS;
}
// optionally add custom VAE
addVAEToGraph(state, graph, SDXL_MODEL_LOADER);
addVAEToGraph(state, graph, modelLoaderNodeId);
// add LoRA support
addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER);
addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId);
// add controlnet, mutating `graph`
addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);

View File

@ -11,18 +11,22 @@ import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph';
import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
CANVAS_OUTPUT,
LATENTS_TO_IMAGE,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
ONNX_MODEL_LOADER,
POSITIVE_CONDITIONING,
REFINER_SEAMLESS,
SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH,
SDXL_DENOISE_LATENTS,
SDXL_MODEL_LOADER,
SEAMLESS,
} from './constants';
import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt';
@ -44,12 +48,22 @@ export const buildCanvasSDXLTextToImageGraph = (
clipSkip,
shouldUseCpuNoise,
shouldUseNoiseSettings,
seamlessXAxis,
seamlessYAxis,
} = state.generation;
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const { shouldAutoSave } = state.canvas;
const {
scaledBoundingBoxDimensions,
boundingBoxScaleMethod,
shouldAutoSave,
} = state.canvas;
const isUsingScaledDimensions = ['auto', 'manual'].includes(
boundingBoxScaleMethod
);
const { shouldUseSDXLRefiner, refinerStart, shouldConcatSDXLStylePrompt } =
state.sdxl;
@ -65,7 +79,7 @@ export const buildCanvasSDXLTextToImageGraph = (
const isUsingOnnxModel = model.model_type === 'onnx';
const modelLoaderNodeId = isUsingOnnxModel
let modelLoaderNodeId = isUsingOnnxModel
? ONNX_MODEL_LOADER
: SDXL_MODEL_LOADER;
@ -136,17 +150,15 @@ export const buildCanvasSDXLTextToImageGraph = (
type: 'noise',
id: NOISE,
is_intermediate: true,
width,
height,
width: !isUsingScaledDimensions
? width
: scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions
? height
: scaledBoundingBoxDimensions.height,
use_cpu,
},
[t2lNode.id]: t2lNode,
[CANVAS_OUTPUT]: {
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: !shouldAutoSave,
fp32: vaePrecision === 'fp32' ? true : false,
},
},
edges: [
// Connect Model Loader to UNet and CLIP
@ -231,19 +243,67 @@ export const buildCanvasSDXLTextToImageGraph = (
field: 'noise',
},
},
// Decode Denoised Latents To Image
],
};
// Decode Latents To Image & Handle Scaled Before Processing
if (isUsingScaledDimensions) {
graph.nodes[LATENTS_TO_IMAGE] = {
id: LATENTS_TO_IMAGE,
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
is_intermediate: true,
fp32: vaePrecision === 'fp32' ? true : false,
};
graph.nodes[CANVAS_OUTPUT] = {
id: CANVAS_OUTPUT,
type: 'img_resize',
is_intermediate: !shouldAutoSave,
width: width,
height: height,
};
graph.edges.push(
{
source: {
node_id: SDXL_DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: CANVAS_OUTPUT,
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
],
};
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'image',
},
}
);
} else {
graph.nodes[CANVAS_OUTPUT] = {
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: !shouldAutoSave,
fp32: vaePrecision === 'fp32' ? true : false,
};
graph.edges.push({
source: {
node_id: SDXL_DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'latents',
},
});
}
// add metadata accumulator, which is only mostly populated - some fields are added later
graph.nodes[METADATA_ACCUMULATOR] = {
@ -251,8 +311,10 @@ export const buildCanvasSDXLTextToImageGraph = (
type: 'metadata_accumulator',
generation_mode: 'txt2img',
cfg_scale,
height,
width,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions
? height
: scaledBoundingBoxDimensions.height,
positive_prompt: '', // set in addDynamicPromptsToGraph
negative_prompt: negativePrompt,
model,
@ -277,9 +339,16 @@ export const buildCanvasSDXLTextToImageGraph = (
},
});
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// Add Refiner if enabled
if (shouldUseSDXLRefiner) {
addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS);
modelLoaderNodeId = REFINER_SEAMLESS;
}
// add LoRA support

View File

@ -10,6 +10,7 @@ import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
@ -17,12 +18,14 @@ import {
CANVAS_TEXT_TO_IMAGE_GRAPH,
CLIP_SKIP,
DENOISE_LATENTS,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
ONNX_MODEL_LOADER,
POSITIVE_CONDITIONING,
SEAMLESS,
} from './constants';
/**
@ -39,15 +42,26 @@ export const buildCanvasTextToImageGraph = (
cfgScale: cfg_scale,
scheduler,
steps,
vaePrecision,
clipSkip,
shouldUseCpuNoise,
shouldUseNoiseSettings,
seamlessXAxis,
seamlessYAxis,
} = state.generation;
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const { shouldAutoSave } = state.canvas;
const {
scaledBoundingBoxDimensions,
boundingBoxScaleMethod,
shouldAutoSave,
} = state.canvas;
const isUsingScaledDimensions = ['auto', 'manual'].includes(
boundingBoxScaleMethod
);
if (!model) {
log.error('No model found in state');
@ -60,7 +74,7 @@ export const buildCanvasTextToImageGraph = (
const isUsingOnnxModel = model.model_type === 'onnx';
const modelLoaderNodeId = isUsingOnnxModel
let modelLoaderNodeId = isUsingOnnxModel
? ONNX_MODEL_LOADER
: MAIN_MODEL_LOADER;
@ -131,16 +145,15 @@ export const buildCanvasTextToImageGraph = (
type: 'noise',
id: NOISE,
is_intermediate: true,
width,
height,
width: !isUsingScaledDimensions
? width
: scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions
? height
: scaledBoundingBoxDimensions.height,
use_cpu,
},
[t2lNode.id]: t2lNode,
[CANVAS_OUTPUT]: {
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: !shouldAutoSave,
},
},
edges: [
// Connect Model Loader to UNet & CLIP Skip
@ -216,19 +229,67 @@ export const buildCanvasTextToImageGraph = (
field: 'noise',
},
},
// Decode denoised latents to image
],
};
// Decode Latents To Image & Handle Scaled Before Processing
if (isUsingScaledDimensions) {
graph.nodes[LATENTS_TO_IMAGE] = {
id: LATENTS_TO_IMAGE,
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
is_intermediate: true,
fp32: vaePrecision === 'fp32' ? true : false,
};
graph.nodes[CANVAS_OUTPUT] = {
id: CANVAS_OUTPUT,
type: 'img_resize',
is_intermediate: !shouldAutoSave,
width: width,
height: height,
};
graph.edges.push(
{
source: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: CANVAS_OUTPUT,
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
],
};
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'image',
},
}
);
} else {
graph.nodes[CANVAS_OUTPUT] = {
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: !shouldAutoSave,
fp32: vaePrecision === 'fp32' ? true : false,
};
graph.edges.push({
source: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'latents',
},
});
}
// add metadata accumulator, which is only mostly populated - some fields are added later
graph.nodes[METADATA_ACCUMULATOR] = {
@ -236,8 +297,10 @@ export const buildCanvasTextToImageGraph = (
type: 'metadata_accumulator',
generation_mode: 'txt2img',
cfg_scale,
height,
width,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions
? height
: scaledBoundingBoxDimensions.height,
positive_prompt: '', // set in addDynamicPromptsToGraph
negative_prompt: negativePrompt,
model,
@ -262,6 +325,12 @@ export const buildCanvasTextToImageGraph = (
},
});
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// optionally add custom VAE
addVAEToGraph(state, graph, modelLoaderNodeId);

View File

@ -10,6 +10,7 @@ import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
@ -24,6 +25,7 @@ import {
NOISE,
POSITIVE_CONDITIONING,
RESIZE,
SEAMLESS,
} from './constants';
/**
@ -49,6 +51,8 @@ export const buildLinearImageToImageGraph = (
shouldUseCpuNoise,
shouldUseNoiseSettings,
vaePrecision,
seamlessXAxis,
seamlessYAxis,
} = state.generation;
// TODO: add batch functionality
@ -80,6 +84,8 @@ export const buildLinearImageToImageGraph = (
throw new Error('No model found in state');
}
let modelLoaderNodeId = MAIN_MODEL_LOADER;
const use_cpu = shouldUseNoiseSettings
? shouldUseCpuNoise
: initialGenerationState.shouldUseCpuNoise;
@ -88,9 +94,9 @@ export const buildLinearImageToImageGraph = (
const graph: NonNullableGraph = {
id: IMAGE_TO_IMAGE_GRAPH,
nodes: {
[MAIN_MODEL_LOADER]: {
[modelLoaderNodeId]: {
type: 'main_model_loader',
id: MAIN_MODEL_LOADER,
id: modelLoaderNodeId,
model,
},
[CLIP_SKIP]: {
@ -141,7 +147,7 @@ export const buildLinearImageToImageGraph = (
// Connect Model Loader to UNet and CLIP Skip
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
@ -151,7 +157,7 @@ export const buildLinearImageToImageGraph = (
},
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
@ -338,11 +344,17 @@ export const buildLinearImageToImageGraph = (
},
});
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// optionally add custom VAE
addVAEToGraph(state, graph, MAIN_MODEL_LOADER);
addVAEToGraph(state, graph, modelLoaderNodeId);
// add LoRA support
addLoRAsToGraph(state, graph, DENOISE_LATENTS);
addLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId);
// add dynamic prompts - also sets up core iteration and seed
addDynamicPromptsToGraph(state, graph);

View File

@ -11,6 +11,7 @@ import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph';
import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
@ -20,10 +21,12 @@ import {
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
REFINER_SEAMLESS,
RESIZE,
SDXL_DENOISE_LATENTS,
SDXL_IMAGE_TO_IMAGE_GRAPH,
SDXL_MODEL_LOADER,
SEAMLESS,
} from './constants';
import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt';
@ -49,6 +52,8 @@ export const buildLinearSDXLImageToImageGraph = (
shouldUseCpuNoise,
shouldUseNoiseSettings,
vaePrecision,
seamlessXAxis,
seamlessYAxis,
} = state.generation;
const {
@ -79,6 +84,9 @@ export const buildLinearSDXLImageToImageGraph = (
throw new Error('No model found in state');
}
// Model Loader ID
let modelLoaderNodeId = SDXL_MODEL_LOADER;
const use_cpu = shouldUseNoiseSettings
? shouldUseCpuNoise
: initialGenerationState.shouldUseCpuNoise;
@ -91,9 +99,9 @@ export const buildLinearSDXLImageToImageGraph = (
const graph: NonNullableGraph = {
id: SDXL_IMAGE_TO_IMAGE_GRAPH,
nodes: {
[SDXL_MODEL_LOADER]: {
[modelLoaderNodeId]: {
type: 'sdxl_model_loader',
id: SDXL_MODEL_LOADER,
id: modelLoaderNodeId,
model,
},
[POSITIVE_CONDITIONING]: {
@ -143,7 +151,7 @@ export const buildLinearSDXLImageToImageGraph = (
// Connect Model Loader to UNet, CLIP & VAE
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
@ -153,7 +161,7 @@ export const buildLinearSDXLImageToImageGraph = (
},
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
@ -163,7 +171,7 @@ export const buildLinearSDXLImageToImageGraph = (
},
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip2',
},
destination: {
@ -173,7 +181,7 @@ export const buildLinearSDXLImageToImageGraph = (
},
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
@ -183,7 +191,7 @@ export const buildLinearSDXLImageToImageGraph = (
},
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip2',
},
destination: {
@ -351,15 +359,23 @@ export const buildLinearSDXLImageToImageGraph = (
},
});
addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER);
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// Add Refiner if enabled
if (shouldUseSDXLRefiner) {
addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS);
modelLoaderNodeId = REFINER_SEAMLESS;
}
// optionally add custom VAE
addVAEToGraph(state, graph, SDXL_MODEL_LOADER);
addVAEToGraph(state, graph, modelLoaderNodeId);
// Add LoRA Support
addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId);
// add controlnet, mutating `graph`
addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);

View File

@ -7,6 +7,7 @@ import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph';
import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
@ -15,9 +16,11 @@ import {
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
REFINER_SEAMLESS,
SDXL_DENOISE_LATENTS,
SDXL_MODEL_LOADER,
SDXL_TEXT_TO_IMAGE_GRAPH,
SEAMLESS,
} from './constants';
import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt';
@ -38,6 +41,8 @@ export const buildLinearSDXLTextToImageGraph = (
shouldUseCpuNoise,
shouldUseNoiseSettings,
vaePrecision,
seamlessXAxis,
seamlessYAxis,
} = state.generation;
const {
@ -61,6 +66,9 @@ export const buildLinearSDXLTextToImageGraph = (
const { craftedPositiveStylePrompt, craftedNegativeStylePrompt } =
craftSDXLStylePrompt(state, shouldConcatSDXLStylePrompt);
// Model Loader ID
let modelLoaderNodeId = SDXL_MODEL_LOADER;
/**
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
* full graph here as a template. Then use the parameters from app state and set friendlier node
@ -74,9 +82,9 @@ export const buildLinearSDXLTextToImageGraph = (
const graph: NonNullableGraph = {
id: SDXL_TEXT_TO_IMAGE_GRAPH,
nodes: {
[SDXL_MODEL_LOADER]: {
[modelLoaderNodeId]: {
type: 'sdxl_model_loader',
id: SDXL_MODEL_LOADER,
id: modelLoaderNodeId,
model,
},
[POSITIVE_CONDITIONING]: {
@ -117,7 +125,7 @@ export const buildLinearSDXLTextToImageGraph = (
// Connect Model Loader to UNet, VAE & CLIP
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
@ -127,7 +135,7 @@ export const buildLinearSDXLTextToImageGraph = (
},
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
@ -137,7 +145,7 @@ export const buildLinearSDXLTextToImageGraph = (
},
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip2',
},
destination: {
@ -147,7 +155,7 @@ export const buildLinearSDXLTextToImageGraph = (
},
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
@ -157,7 +165,7 @@ export const buildLinearSDXLTextToImageGraph = (
},
{
source: {
node_id: SDXL_MODEL_LOADER,
node_id: modelLoaderNodeId,
field: 'clip2',
},
destination: {
@ -244,16 +252,23 @@ export const buildLinearSDXLTextToImageGraph = (
},
});
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// Add Refiner if enabled
if (shouldUseSDXLRefiner) {
addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS);
modelLoaderNodeId = REFINER_SEAMLESS;
}
// optionally add custom VAE
addVAEToGraph(state, graph, SDXL_MODEL_LOADER);
addVAEToGraph(state, graph, modelLoaderNodeId);
// add LoRA support
addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER);
addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId);
// add controlnet, mutating `graph`
addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);

View File

@ -10,6 +10,7 @@ import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
@ -22,6 +23,7 @@ import {
NOISE,
ONNX_MODEL_LOADER,
POSITIVE_CONDITIONING,
SEAMLESS,
TEXT_TO_IMAGE_GRAPH,
} from './constants';
@ -42,6 +44,8 @@ export const buildLinearTextToImageGraph = (
shouldUseCpuNoise,
shouldUseNoiseSettings,
vaePrecision,
seamlessXAxis,
seamlessYAxis,
} = state.generation;
const use_cpu = shouldUseNoiseSettings
@ -55,7 +59,7 @@ export const buildLinearTextToImageGraph = (
const isUsingOnnxModel = model.model_type === 'onnx';
const modelLoaderNodeId = isUsingOnnxModel
let modelLoaderNodeId = isUsingOnnxModel
? ONNX_MODEL_LOADER
: MAIN_MODEL_LOADER;
@ -258,6 +262,12 @@ export const buildLinearTextToImageGraph = (
},
});
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// optionally add custom VAE
addVAEToGraph(state, graph, modelLoaderNodeId);

View File

@ -17,6 +17,7 @@ export const CLIP_SKIP = 'clip_skip';
export const IMAGE_TO_LATENTS = 'image_to_latents';
export const LATENTS_TO_LATENTS = 'latents_to_latents';
export const RESIZE = 'resize_image';
export const IMG2IMG_RESIZE = 'img2img_resize';
export const CANVAS_OUTPUT = 'canvas_output';
export const INPAINT_IMAGE = 'inpaint_image';
export const SCALED_INPAINT_IMAGE = 'scaled_inpaint_image';
@ -25,6 +26,7 @@ export const INPAINT_IMAGE_RESIZE_DOWN = 'inpaint_image_resize_down';
export const INPAINT_INFILL = 'inpaint_infill';
export const INPAINT_INFILL_RESIZE_DOWN = 'inpaint_infill_resize_down';
export const INPAINT_FINAL_IMAGE = 'inpaint_final_image';
export const INPAINT_CREATE_MASK = 'inpaint_create_mask';
export const CANVAS_COHERENCE_DENOISE_LATENTS =
'canvas_coherence_denoise_latents';
export const CANVAS_COHERENCE_NOISE = 'canvas_coherence_noise';
@ -54,6 +56,8 @@ export const SDXL_REFINER_POSITIVE_CONDITIONING =
export const SDXL_REFINER_NEGATIVE_CONDITIONING =
'sdxl_refiner_negative_conditioning';
export const SDXL_REFINER_DENOISE_LATENTS = 'sdxl_refiner_denoise_latents';
export const SEAMLESS = 'seamless';
export const REFINER_SEAMLESS = 'refiner_seamless';
// friendly graph ids
export const TEXT_TO_IMAGE_GRAPH = 'text_to_image_graph';

View File

@ -0,0 +1,81 @@
import { skipToken } from '@reduxjs/toolkit/dist/query';
import { t } from 'i18next';
import { useCallback, useState } from 'react';
import { useAppToaster } from '../../../app/components/Toaster';
import { useAppDispatch } from '../../../app/store/storeHooks';
import {
useGetImageDTOQuery,
useGetImageMetadataQuery,
} from '../../../services/api/endpoints/images';
import { setInitialCanvasImage } from '../../canvas/store/canvasSlice';
import { setActiveTab } from '../../ui/store/uiSlice';
import { initialImageSelected } from '../store/actions';
import { useRecallParameters } from './useRecallParameters';
type SelectedImage = {
imageName: string;
action: 'sendToImg2Img' | 'sendToCanvas' | 'useAllParameters';
};
export const usePreselectedImage = () => {
const dispatch = useAppDispatch();
const [imageNameForDto, setImageNameForDto] = useState<string | undefined>();
const [imageNameForMetadata, setImageNameForMetadata] = useState<
string | undefined
>();
const { recallAllParameters } = useRecallParameters();
const toaster = useAppToaster();
const { currentData: selectedImageDto } = useGetImageDTOQuery(
imageNameForDto ?? skipToken
);
const { currentData: selectedImageMetadata } = useGetImageMetadataQuery(
imageNameForMetadata ?? skipToken
);
const handlePreselectedImage = useCallback(
(selectedImage?: SelectedImage) => {
if (!selectedImage) {
return;
}
if (selectedImage.action === 'sendToCanvas') {
setImageNameForDto(selectedImage?.imageName);
if (selectedImageDto) {
dispatch(setInitialCanvasImage(selectedImageDto));
dispatch(setActiveTab('unifiedCanvas'));
toaster({
title: t('toast.sentToUnifiedCanvas'),
status: 'info',
duration: 2500,
isClosable: true,
});
}
}
if (selectedImage.action === 'sendToImg2Img') {
setImageNameForDto(selectedImage?.imageName);
if (selectedImageDto) {
dispatch(initialImageSelected(selectedImageDto));
}
}
if (selectedImage.action === 'useAllParameters') {
setImageNameForMetadata(selectedImage?.imageName);
if (selectedImageMetadata) {
recallAllParameters(selectedImageMetadata.metadata);
}
}
},
[
dispatch,
selectedImageDto,
selectedImageMetadata,
recallAllParameters,
toaster,
]
);
return { handlePreselectedImage };
};

View File

@ -2,6 +2,7 @@ import ParamDynamicPromptsCollapse from 'features/dynamicPrompts/components/Para
import ParamLoraCollapse from 'features/lora/components/ParamLoraCollapse';
import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse';
import ParamNoiseCollapse from 'features/parameters/components/Parameters/Noise/ParamNoiseCollapse';
import ParamSeamlessCollapse from 'features/parameters/components/Parameters/Seamless/ParamSeamlessCollapse';
import { memo } from 'react';
import ParamSDXLPromptArea from './ParamSDXLPromptArea';
import ParamSDXLRefinerCollapse from './ParamSDXLRefinerCollapse';
@ -17,6 +18,7 @@ const SDXLImageToImageTabParameters = () => {
<ParamLoraCollapse />
<ParamDynamicPromptsCollapse />
<ParamNoiseCollapse />
<ParamSeamlessCollapse />
</>
);
};

View File

@ -2,6 +2,7 @@ import ParamDynamicPromptsCollapse from 'features/dynamicPrompts/components/Para
import ParamLoraCollapse from 'features/lora/components/ParamLoraCollapse';
import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse';
import ParamNoiseCollapse from 'features/parameters/components/Parameters/Noise/ParamNoiseCollapse';
import ParamSeamlessCollapse from 'features/parameters/components/Parameters/Seamless/ParamSeamlessCollapse';
import TextToImageTabCoreParameters from 'features/ui/components/tabs/TextToImage/TextToImageTabCoreParameters';
import { memo } from 'react';
import ParamSDXLPromptArea from './ParamSDXLPromptArea';
@ -17,6 +18,7 @@ const SDXLTextToImageTabParameters = () => {
<ParamLoraCollapse />
<ParamDynamicPromptsCollapse />
<ParamNoiseCollapse />
<ParamSeamlessCollapse />
</>
);
};

View File

@ -5,6 +5,7 @@ import ParamMaskAdjustmentCollapse from 'features/parameters/components/Paramete
import ParamCanvasCoherencePassCollapse from 'features/parameters/components/Parameters/Canvas/SeamPainting/ParamCanvasCoherencePassCollapse';
import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse';
import ParamNoiseCollapse from 'features/parameters/components/Parameters/Noise/ParamNoiseCollapse';
import ParamSeamlessCollapse from 'features/parameters/components/Parameters/Seamless/ParamSeamlessCollapse';
import ParamSDXLPromptArea from './ParamSDXLPromptArea';
import ParamSDXLRefinerCollapse from './ParamSDXLRefinerCollapse';
import SDXLUnifiedCanvasTabCoreParameters from './SDXLUnifiedCanvasTabCoreParameters';
@ -22,6 +23,7 @@ export default function SDXLUnifiedCanvasTabParameters() {
<ParamMaskAdjustmentCollapse />
<ParamInfillAndScalingCollapse />
<ParamCanvasCoherencePassCollapse />
<ParamSeamlessCollapse />
</>
);
}

View File

@ -9,7 +9,6 @@ export const initialConfigState: AppConfig = {
disabledFeatures: ['lightbox', 'faceRestore', 'batches'],
disabledSDFeatures: [
'variation',
'seamless',
'symmetry',
'hires',
'perlinNoise',

View File

@ -6,6 +6,7 @@ import ParamMaskAdjustmentCollapse from 'features/parameters/components/Paramete
import ParamCanvasCoherencePassCollapse from 'features/parameters/components/Parameters/Canvas/SeamPainting/ParamCanvasCoherencePassCollapse';
import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse';
import ParamPromptArea from 'features/parameters/components/Parameters/Prompt/ParamPromptArea';
import ParamSeamlessCollapse from 'features/parameters/components/Parameters/Seamless/ParamSeamlessCollapse';
import ParamSymmetryCollapse from 'features/parameters/components/Parameters/Symmetry/ParamSymmetryCollapse';
import { memo } from 'react';
import UnifiedCanvasCoreParameters from './UnifiedCanvasCoreParameters';
@ -22,6 +23,7 @@ const UnifiedCanvasParameters = () => {
<ParamMaskAdjustmentCollapse />
<ParamInfillAndScalingCollapse />
<ParamCanvasCoherencePassCollapse />
<ParamSeamlessCollapse />
<ParamAdvancedCollapse />
</>
);

View File

@ -111,6 +111,7 @@ export type ImageBlurInvocation = s['ImageBlurInvocation'];
export type ImageScaleInvocation = s['ImageScaleInvocation'];
export type InfillPatchMatchInvocation = s['InfillPatchMatchInvocation'];
export type InfillTileInvocation = s['InfillTileInvocation'];
export type CreateDenoiseMaskInvocation = s['CreateDenoiseMaskInvocation'];
export type RandomIntInvocation = s['RandomIntInvocation'];
export type CompelInvocation = s['CompelInvocation'];
export type DynamicPromptInvocation = s['DynamicPromptInvocation'];
@ -129,6 +130,7 @@ export type ESRGANInvocation = s['ESRGANInvocation'];
export type DivideInvocation = s['DivideInvocation'];
export type ImageNSFWBlurInvocation = s['ImageNSFWBlurInvocation'];
export type ImageWatermarkInvocation = s['ImageWatermarkInvocation'];
export type SeamlessModeInvocation = s['SeamlessModeInvocation'];
// ControlNet Nodes
export type ControlNetInvocation = s['ControlNetInvocation'];