mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
feat: Inpaint & Outpaint Improvements (#4408)
## What type of PR is this? (check all applicable) - [x] Feature - [x] Optimization ## Have you discussed this change with the InvokeAI team? - [x] Yes ## Description # Coherence Mode A new parameter called Coherence Mode has been added to Coherence Pass settings. This parameter controls what kind of Coherence Pass is done after Inpainting and Outpainting. - Unmasked: This performs a complete unmasked image to image pass on the entire generation. - Mask: This performs a masked image to image pass using your input mask as the coherence mask. - Mask Edge [DEFAULT] - This performs as masked image to image pass on the edges of your mask to try and clear out the seams. # Why The Coherence Masked Modes? One of the issues with unmasked coherence pass arises when the diffusion process is trying to align detailed or organic objects. Because Image to Image tends change the image a little bit even at lower strengths, this ends up in the paste back process being slightly misaligned. By providing the mask to the Coherence Pass, we can try to eliminate this in those cases. While it will be impossible to address this for every image out there, having these options will allow the user to automate a lot of this. For everything else there's manual paint over with inpaint. # Graph Improvements The graphs have now been refined quite a bit. We no longer do manual blurring of the masks anymore for outpainting. This is no longer needed because we now dilate the mask depending on the blur size while pasting back. As a result we got rid of quite a few nodes that were handling this in the older graph. The graphs are also a lot cleaner now because we now tackle Scaled Dimensions & Coherence Mode completely independently. Inpainting result seem very promising especially with the Mask Edge mode. --- # New Infill Methods [Experimental] We are currently trying out various new infill methods to see which ones might perform the best in outpainting. We may keep all of them or keep none. This will be decided as we test more. ## LaMa Infill - Renabled LaMA infill in the UI. - We are trying to get this to work without a memory overhead. In order to use LaMa, you need to manually download and place the LaMa JIT model in `models/core/misc/lama/lama.pt`. You can download the JIT model from Sanster [here](https://github.com/Sanster/models/releases/download/add_big_lama/big-lama.pt) and rename it to `lama.pt` or you can use the script in the original LaMA repo to convert the base model to a JIT model yourself. ## CV2 Infill - Added a new infilling method using CV2's Inpaint. ## Patchmatch Rescaling Patchmatch infill input image is now downscaled and infilled. Patchmatch can be really slow at large resolutions and this is a pretty decent way to get around that. Additionally, downscaling might also provide a better patch match by avoiding larger areas to be infilled with repeating patches. But that's just the theory. Still testing it out. ## [optional] Are there any post deployment tasks we need to perform? - If we decide to keep LaMA infill, then we will need to host the model and update the installer to download it as a core model.
This commit is contained in:
commit
6b850d506a
@ -1,19 +1,19 @@
|
||||
import typing
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
from fastapi import Body
|
||||
from fastapi.routing import APIRouter
|
||||
from pathlib import Path
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.invocations.upscale import ESRGAN_MODELS
|
||||
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
|
||||
from invokeai.backend.image_util.patchmatch import PatchMatch
|
||||
from invokeai.backend.image_util.safety_checker import SafetyChecker
|
||||
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
|
||||
from invokeai.app.invocations.upscale import ESRGAN_MODELS
|
||||
|
||||
from invokeai.backend.util.logging import logging
|
||||
from invokeai.version import __version__
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
from invokeai.backend.util.logging import logging
|
||||
|
||||
|
||||
class LogLevel(int, Enum):
|
||||
@ -55,7 +55,7 @@ async def get_version() -> AppVersion:
|
||||
|
||||
@app_router.get("/config", operation_id="get_config", status_code=200, response_model=AppConfig)
|
||||
async def get_config() -> AppConfig:
|
||||
infill_methods = ["tile", "lama"]
|
||||
infill_methods = ["tile", "lama", "cv2"]
|
||||
if PatchMatch.patchmatch_available():
|
||||
infill_methods.append("patchmatch")
|
||||
|
||||
|
@ -563,7 +563,7 @@ class MaskEdgeInvocation(BaseInvocation):
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
mask = context.services.images.get_pil_image(self.image.image_name)
|
||||
mask = context.services.images.get_pil_image(self.image.image_name).convert("L")
|
||||
|
||||
npimg = numpy.asarray(mask, dtype=numpy.uint8)
|
||||
npgradient = numpy.uint8(255 * (1.0 - numpy.floor(numpy.abs(0.5 - numpy.float32(npimg) / 255.0) * 2.0)))
|
||||
@ -700,8 +700,13 @@ class ColorCorrectInvocation(BaseInvocation):
|
||||
# Blur the mask out (into init image) by specified amount
|
||||
if self.mask_blur_radius > 0:
|
||||
nm = numpy.asarray(pil_init_mask, dtype=numpy.uint8)
|
||||
inverted_nm = 255 - nm
|
||||
dilation_size = int(round(self.mask_blur_radius) + 20)
|
||||
dilating_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (dilation_size, dilation_size))
|
||||
inverted_dilated_nm = cv2.dilate(inverted_nm, dilating_kernel)
|
||||
dilated_nm = 255 - inverted_dilated_nm
|
||||
nmd = cv2.erode(
|
||||
nm,
|
||||
dilated_nm,
|
||||
kernel=numpy.ones((3, 3), dtype=numpy.uint8),
|
||||
iterations=int(self.mask_blur_radius / 2),
|
||||
)
|
||||
|
@ -8,19 +8,17 @@ from PIL import Image, ImageOps
|
||||
|
||||
from invokeai.app.invocations.primitives import ColorField, ImageField, ImageOutput
|
||||
from invokeai.app.util.misc import SEED_MAX, get_random_seed
|
||||
from invokeai.backend.image_util.cv2_inpaint import cv2_inpaint
|
||||
from invokeai.backend.image_util.lama import LaMA
|
||||
from invokeai.backend.image_util.patchmatch import PatchMatch
|
||||
|
||||
from ..models.image import ImageCategory, ResourceOrigin
|
||||
from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation
|
||||
from .image import PIL_RESAMPLING_MAP, PIL_RESAMPLING_MODES
|
||||
|
||||
|
||||
def infill_methods() -> list[str]:
|
||||
methods = [
|
||||
"tile",
|
||||
"solid",
|
||||
"lama",
|
||||
]
|
||||
methods = ["tile", "solid", "lama", "cv2"]
|
||||
if PatchMatch.patchmatch_available():
|
||||
methods.insert(0, "patchmatch")
|
||||
return methods
|
||||
@ -49,6 +47,10 @@ def infill_patchmatch(im: Image.Image) -> Image.Image:
|
||||
return im_patched
|
||||
|
||||
|
||||
def infill_cv2(im: Image.Image) -> Image.Image:
|
||||
return cv2_inpaint(im)
|
||||
|
||||
|
||||
def get_tile_images(image: np.ndarray, width=8, height=8):
|
||||
_nrows, _ncols, depth = image.shape
|
||||
_strides = image.strides
|
||||
@ -194,15 +196,35 @@ class InfillPatchMatchInvocation(BaseInvocation):
|
||||
"""Infills transparent areas of an image using the PatchMatch algorithm"""
|
||||
|
||||
image: ImageField = InputField(description="The image to infill")
|
||||
downscale: float = InputField(default=2.0, gt=0, description="Run patchmatch on downscaled image to speedup infill")
|
||||
resample_mode: PIL_RESAMPLING_MODES = InputField(default="bicubic", description="The resampling mode")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.services.images.get_pil_image(self.image.image_name)
|
||||
image = context.services.images.get_pil_image(self.image.image_name).convert("RGBA")
|
||||
|
||||
resample_mode = PIL_RESAMPLING_MAP[self.resample_mode]
|
||||
|
||||
infill_image = image.copy()
|
||||
width = int(image.width / self.downscale)
|
||||
height = int(image.height / self.downscale)
|
||||
infill_image = infill_image.resize(
|
||||
(width, height),
|
||||
resample=resample_mode,
|
||||
)
|
||||
|
||||
if PatchMatch.patchmatch_available():
|
||||
infilled = infill_patchmatch(image.copy())
|
||||
infilled = infill_patchmatch(infill_image)
|
||||
else:
|
||||
raise ValueError("PatchMatch is not available on this system")
|
||||
|
||||
infilled = infilled.resize(
|
||||
(image.width, image.height),
|
||||
resample=resample_mode,
|
||||
)
|
||||
|
||||
infilled.paste(image, (0, 0), mask=image.split()[-1])
|
||||
# image.paste(infilled, (0, 0), mask=image.split()[-1])
|
||||
|
||||
image_dto = context.services.images.create(
|
||||
image=infilled,
|
||||
image_origin=ResourceOrigin.INTERNAL,
|
||||
@ -245,3 +267,30 @@ class LaMaInfillInvocation(BaseInvocation):
|
||||
width=image_dto.width,
|
||||
height=image_dto.height,
|
||||
)
|
||||
|
||||
|
||||
@invocation("infill_cv2", title="CV2 Infill", tags=["image", "inpaint"], category="inpaint")
|
||||
class CV2InfillInvocation(BaseInvocation):
|
||||
"""Infills transparent areas of an image using OpenCV Inpainting"""
|
||||
|
||||
image: ImageField = InputField(description="The image to infill")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.services.images.get_pil_image(self.image.image_name)
|
||||
|
||||
infilled = infill_cv2(image.copy())
|
||||
|
||||
image_dto = context.services.images.create(
|
||||
image=infilled,
|
||||
image_origin=ResourceOrigin.INTERNAL,
|
||||
image_category=ImageCategory.GENERAL,
|
||||
node_id=self.id,
|
||||
session_id=context.graph_execution_state_id,
|
||||
is_intermediate=self.is_intermediate,
|
||||
)
|
||||
|
||||
return ImageOutput(
|
||||
image=ImageField(image_name=image_dto.image_name),
|
||||
width=image_dto.width,
|
||||
height=image_dto.height,
|
||||
)
|
||||
|
20
invokeai/backend/image_util/cv2_inpaint.py
Normal file
20
invokeai/backend/image_util/cv2_inpaint.py
Normal file
@ -0,0 +1,20 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
|
||||
def cv2_inpaint(image: Image.Image) -> Image.Image:
|
||||
# Prepare Image
|
||||
image_array = np.array(image.convert("RGB"))
|
||||
image_cv = cv2.cvtColor(image_array, cv2.COLOR_RGB2BGR)
|
||||
|
||||
# Prepare Mask From Alpha Channel
|
||||
mask = image.split()[3].convert("RGB")
|
||||
mask_array = np.array(mask)
|
||||
mask_cv = cv2.cvtColor(mask_array, cv2.COLOR_BGR2GRAY)
|
||||
mask_inv = cv2.bitwise_not(mask_cv)
|
||||
|
||||
# Inpaint Image
|
||||
inpainted_result = cv2.inpaint(image_cv, mask_inv, 3, cv2.INPAINT_TELEA)
|
||||
inpainted_image = Image.fromarray(cv2.cvtColor(inpainted_result, cv2.COLOR_BGR2RGB))
|
||||
return inpainted_image
|
@ -5,6 +5,7 @@ import numpy as np
|
||||
import torch
|
||||
from PIL import Image
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import get_invokeai_config
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
|
||||
@ -19,7 +20,7 @@ def norm_img(np_img):
|
||||
|
||||
def load_jit_model(url_or_path, device):
|
||||
model_path = url_or_path
|
||||
print(f"Loading model from: {model_path}")
|
||||
logger.info(f"Loading model from: {model_path}")
|
||||
model = torch.jit.load(model_path, map_location="cpu").to(device)
|
||||
model.eval()
|
||||
return model
|
||||
@ -52,5 +53,6 @@ class LaMA:
|
||||
|
||||
del model
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
return infilled_image
|
||||
|
@ -290,9 +290,20 @@ def download_realesrgan():
|
||||
download_with_progress_bar(model["url"], config.models_path / model["dest"], model["description"])
|
||||
|
||||
|
||||
# ---------------------------------------------
|
||||
def download_lama():
|
||||
logger.info("Installing lama infill model")
|
||||
download_with_progress_bar(
|
||||
"https://github.com/Sanster/models/releases/download/add_big_lama/big-lama.pt",
|
||||
config.models_path / "core/misc/lama/lama.pt",
|
||||
"lama infill model",
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------
|
||||
def download_support_models():
|
||||
download_realesrgan()
|
||||
download_lama()
|
||||
download_conversion_models()
|
||||
|
||||
|
||||
|
@ -511,6 +511,7 @@
|
||||
"maskBlur": "Blur",
|
||||
"maskBlurMethod": "Blur Method",
|
||||
"coherencePassHeader": "Coherence Pass",
|
||||
"coherenceMode": "Mode",
|
||||
"coherenceSteps": "Steps",
|
||||
"coherenceStrength": "Strength",
|
||||
"seamLowThreshold": "Low",
|
||||
@ -520,6 +521,7 @@
|
||||
"scaledHeight": "Scaled H",
|
||||
"infillMethod": "Infill Method",
|
||||
"tileSize": "Tile Size",
|
||||
"patchmatchDownScaleSize": "Downscale",
|
||||
"boundingBoxHeader": "Bounding Box",
|
||||
"seamCorrectionHeader": "Seam Correction",
|
||||
"infillScalingHeader": "Infill and Scaling",
|
||||
|
@ -118,7 +118,11 @@ const IAICanvasToolChooserOptions = () => {
|
||||
useHotkeys(
|
||||
['BracketLeft'],
|
||||
() => {
|
||||
dispatch(setBrushSize(Math.max(brushSize - 5, 5)));
|
||||
if (brushSize - 5 <= 5) {
|
||||
dispatch(setBrushSize(Math.max(brushSize - 1, 1)));
|
||||
} else {
|
||||
dispatch(setBrushSize(Math.max(brushSize - 5, 1)));
|
||||
}
|
||||
},
|
||||
{
|
||||
enabled: () => !isStaging,
|
||||
|
@ -10,7 +10,8 @@ import {
|
||||
CANVAS_OUTPUT,
|
||||
INPAINT_IMAGE_RESIZE_UP,
|
||||
LATENTS_TO_IMAGE,
|
||||
MASK_BLUR,
|
||||
MASK_COMBINE,
|
||||
MASK_RESIZE_UP,
|
||||
METADATA_ACCUMULATOR,
|
||||
SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH,
|
||||
SDXL_CANVAS_INPAINT_GRAPH,
|
||||
@ -46,6 +47,8 @@ export const addSDXLRefinerToGraph = (
|
||||
const { seamlessXAxis, seamlessYAxis, vaePrecision } = state.generation;
|
||||
const { boundingBoxScaleMethod } = state.canvas;
|
||||
|
||||
const fp32 = vaePrecision === 'fp32';
|
||||
|
||||
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||
boundingBoxScaleMethod
|
||||
);
|
||||
@ -231,7 +234,7 @@ export const addSDXLRefinerToGraph = (
|
||||
type: 'create_denoise_mask',
|
||||
id: SDXL_REFINER_INPAINT_CREATE_MASK,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
};
|
||||
|
||||
if (isUsingScaledDimensions) {
|
||||
@ -257,7 +260,7 @@ export const addSDXLRefinerToGraph = (
|
||||
graph.edges.push(
|
||||
{
|
||||
source: {
|
||||
node_id: MASK_BLUR,
|
||||
node_id: isUsingScaledDimensions ? MASK_RESIZE_UP : MASK_COMBINE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
|
@ -2,6 +2,7 @@ import { RootState } from 'app/store/store';
|
||||
import { NonNullableGraph } from 'features/nodes/types/types';
|
||||
import { MetadataAccumulatorInvocation } from 'services/api/types';
|
||||
import {
|
||||
CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
CANVAS_IMAGE_TO_IMAGE_GRAPH,
|
||||
CANVAS_INPAINT_GRAPH,
|
||||
CANVAS_OUTPAINT_GRAPH,
|
||||
@ -31,7 +32,7 @@ export const addVAEToGraph = (
|
||||
graph: NonNullableGraph,
|
||||
modelLoaderNodeId: string = MAIN_MODEL_LOADER
|
||||
): void => {
|
||||
const { vae } = state.generation;
|
||||
const { vae, canvasCoherenceMode } = state.generation;
|
||||
const { boundingBoxScaleMethod } = state.canvas;
|
||||
const { shouldUseSDXLRefiner } = state.sdxl;
|
||||
|
||||
@ -146,6 +147,20 @@ export const addVAEToGraph = (
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
// Handle Coherence Mode
|
||||
if (canvasCoherenceMode !== 'unmasked') {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'vae',
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (shouldUseSDXLRefiner) {
|
||||
|
@ -59,6 +59,8 @@ export const buildCanvasImageToImageGraph = (
|
||||
shouldAutoSave,
|
||||
} = state.canvas;
|
||||
|
||||
const fp32 = vaePrecision === 'fp32';
|
||||
|
||||
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||
boundingBoxScaleMethod
|
||||
);
|
||||
@ -245,7 +247,7 @@ export const buildCanvasImageToImageGraph = (
|
||||
id: LATENTS_TO_IMAGE,
|
||||
type: 'l2i',
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
};
|
||||
graph.nodes[CANVAS_OUTPUT] = {
|
||||
id: CANVAS_OUTPUT,
|
||||
@ -292,7 +294,7 @@ export const buildCanvasImageToImageGraph = (
|
||||
type: 'l2i',
|
||||
id: CANVAS_OUTPUT,
|
||||
is_intermediate: !shouldAutoSave,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
};
|
||||
|
||||
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image =
|
||||
|
@ -6,6 +6,7 @@ import {
|
||||
ImageBlurInvocation,
|
||||
ImageDTO,
|
||||
ImageToLatentsInvocation,
|
||||
MaskEdgeInvocation,
|
||||
NoiseInvocation,
|
||||
RandomIntInvocation,
|
||||
RangeOfSizeInvocation,
|
||||
@ -18,6 +19,8 @@ import { addVAEToGraph } from './addVAEToGraph';
|
||||
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
|
||||
import {
|
||||
CANVAS_COHERENCE_DENOISE_LATENTS,
|
||||
CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
CANVAS_COHERENCE_MASK_EDGE,
|
||||
CANVAS_COHERENCE_NOISE,
|
||||
CANVAS_COHERENCE_NOISE_INCREMENT,
|
||||
CANVAS_INPAINT_GRAPH,
|
||||
@ -67,6 +70,7 @@ export const buildCanvasInpaintGraph = (
|
||||
shouldUseCpuNoise,
|
||||
maskBlur,
|
||||
maskBlurMethod,
|
||||
canvasCoherenceMode,
|
||||
canvasCoherenceSteps,
|
||||
canvasCoherenceStrength,
|
||||
clipSkip,
|
||||
@ -89,6 +93,12 @@ export const buildCanvasInpaintGraph = (
|
||||
shouldAutoSave,
|
||||
} = state.canvas;
|
||||
|
||||
const fp32 = vaePrecision === 'fp32';
|
||||
|
||||
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||
boundingBoxScaleMethod
|
||||
);
|
||||
|
||||
let modelLoaderNodeId = MAIN_MODEL_LOADER;
|
||||
|
||||
const use_cpu = shouldUseNoiseSettings
|
||||
@ -133,13 +143,7 @@ export const buildCanvasInpaintGraph = (
|
||||
type: 'i2l',
|
||||
id: INPAINT_IMAGE,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
},
|
||||
[INPAINT_CREATE_MASK]: {
|
||||
type: 'create_denoise_mask',
|
||||
id: INPAINT_CREATE_MASK,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
[NOISE]: {
|
||||
type: 'noise',
|
||||
@ -147,6 +151,12 @@ export const buildCanvasInpaintGraph = (
|
||||
use_cpu,
|
||||
is_intermediate: true,
|
||||
},
|
||||
[INPAINT_CREATE_MASK]: {
|
||||
type: 'create_denoise_mask',
|
||||
id: INPAINT_CREATE_MASK,
|
||||
is_intermediate: true,
|
||||
fp32,
|
||||
},
|
||||
[DENOISE_LATENTS]: {
|
||||
type: 'denoise_latents',
|
||||
id: DENOISE_LATENTS,
|
||||
@ -171,7 +181,7 @@ export const buildCanvasInpaintGraph = (
|
||||
},
|
||||
[CANVAS_COHERENCE_DENOISE_LATENTS]: {
|
||||
type: 'denoise_latents',
|
||||
id: DENOISE_LATENTS,
|
||||
id: CANVAS_COHERENCE_DENOISE_LATENTS,
|
||||
is_intermediate: true,
|
||||
steps: canvasCoherenceSteps,
|
||||
cfg_scale: cfg_scale,
|
||||
@ -183,7 +193,7 @@ export const buildCanvasInpaintGraph = (
|
||||
type: 'l2i',
|
||||
id: LATENTS_TO_IMAGE,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
[CANVAS_OUTPUT]: {
|
||||
type: 'color_correct',
|
||||
@ -418,7 +428,7 @@ export const buildCanvasInpaintGraph = (
|
||||
};
|
||||
|
||||
// Handle Scale Before Processing
|
||||
if (['auto', 'manual'].includes(boundingBoxScaleMethod)) {
|
||||
if (isUsingScaledDimensions) {
|
||||
const scaledWidth: number = scaledBoundingBoxDimensions.width;
|
||||
const scaledHeight: number = scaledBoundingBoxDimensions.height;
|
||||
|
||||
@ -581,6 +591,116 @@ export const buildCanvasInpaintGraph = (
|
||||
);
|
||||
}
|
||||
|
||||
// Handle Coherence Mode
|
||||
if (canvasCoherenceMode !== 'unmasked') {
|
||||
// Create Mask If Coherence Mode Is Not Full
|
||||
graph.nodes[CANVAS_COHERENCE_INPAINT_CREATE_MASK] = {
|
||||
type: 'create_denoise_mask',
|
||||
id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
is_intermediate: true,
|
||||
fp32,
|
||||
};
|
||||
|
||||
// Handle Image Input For Mask Creation
|
||||
if (isUsingScaledDimensions) {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: INPAINT_IMAGE_RESIZE_UP,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'image',
|
||||
},
|
||||
});
|
||||
} else {
|
||||
graph.nodes[CANVAS_COHERENCE_INPAINT_CREATE_MASK] = {
|
||||
...(graph.nodes[
|
||||
CANVAS_COHERENCE_INPAINT_CREATE_MASK
|
||||
] as CreateDenoiseMaskInvocation),
|
||||
image: canvasInitImage,
|
||||
};
|
||||
}
|
||||
|
||||
// Create Mask If Coherence Mode Is Mask
|
||||
if (canvasCoherenceMode === 'mask') {
|
||||
if (isUsingScaledDimensions) {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: MASK_RESIZE_UP,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'mask',
|
||||
},
|
||||
});
|
||||
} else {
|
||||
graph.nodes[CANVAS_COHERENCE_INPAINT_CREATE_MASK] = {
|
||||
...(graph.nodes[
|
||||
CANVAS_COHERENCE_INPAINT_CREATE_MASK
|
||||
] as CreateDenoiseMaskInvocation),
|
||||
mask: canvasMaskImage,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Create Mask Edge If Coherence Mode Is Edge
|
||||
if (canvasCoherenceMode === 'edge') {
|
||||
graph.nodes[CANVAS_COHERENCE_MASK_EDGE] = {
|
||||
type: 'mask_edge',
|
||||
id: CANVAS_COHERENCE_MASK_EDGE,
|
||||
is_intermediate: true,
|
||||
edge_blur: maskBlur,
|
||||
edge_size: maskBlur * 2,
|
||||
low_threshold: 100,
|
||||
high_threshold: 200,
|
||||
};
|
||||
|
||||
// Handle Scaled Dimensions For Mask Edge
|
||||
if (isUsingScaledDimensions) {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: MASK_RESIZE_UP,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_MASK_EDGE,
|
||||
field: 'image',
|
||||
},
|
||||
});
|
||||
} else {
|
||||
graph.nodes[CANVAS_COHERENCE_MASK_EDGE] = {
|
||||
...(graph.nodes[CANVAS_COHERENCE_MASK_EDGE] as MaskEdgeInvocation),
|
||||
image: canvasMaskImage,
|
||||
};
|
||||
}
|
||||
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: CANVAS_COHERENCE_MASK_EDGE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'mask',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// Plug Denoise Mask To Coherence Denoise Latents
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'denoise_mask',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_DENOISE_LATENTS,
|
||||
field: 'denoise_mask',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// Handle Seed
|
||||
if (shouldRandomizeSeed) {
|
||||
// Random int node to generate the starting seed
|
||||
|
@ -2,7 +2,6 @@ import { logger } from 'app/logging/logger';
|
||||
import { RootState } from 'app/store/store';
|
||||
import { NonNullableGraph } from 'features/nodes/types/types';
|
||||
import {
|
||||
ImageBlurInvocation,
|
||||
ImageDTO,
|
||||
ImageToLatentsInvocation,
|
||||
InfillPatchMatchInvocation,
|
||||
@ -19,6 +18,8 @@ import { addVAEToGraph } from './addVAEToGraph';
|
||||
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
|
||||
import {
|
||||
CANVAS_COHERENCE_DENOISE_LATENTS,
|
||||
CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
CANVAS_COHERENCE_MASK_EDGE,
|
||||
CANVAS_COHERENCE_NOISE,
|
||||
CANVAS_COHERENCE_NOISE_INCREMENT,
|
||||
CANVAS_OUTPAINT_GRAPH,
|
||||
@ -34,7 +35,6 @@ import {
|
||||
ITERATE,
|
||||
LATENTS_TO_IMAGE,
|
||||
MAIN_MODEL_LOADER,
|
||||
MASK_BLUR,
|
||||
MASK_COMBINE,
|
||||
MASK_FROM_ALPHA,
|
||||
MASK_RESIZE_DOWN,
|
||||
@ -71,10 +71,11 @@ export const buildCanvasOutpaintGraph = (
|
||||
shouldUseNoiseSettings,
|
||||
shouldUseCpuNoise,
|
||||
maskBlur,
|
||||
maskBlurMethod,
|
||||
canvasCoherenceMode,
|
||||
canvasCoherenceSteps,
|
||||
canvasCoherenceStrength,
|
||||
tileSize,
|
||||
infillTileSize,
|
||||
infillPatchmatchDownscaleSize,
|
||||
infillMethod,
|
||||
clipSkip,
|
||||
seamlessXAxis,
|
||||
@ -96,6 +97,12 @@ export const buildCanvasOutpaintGraph = (
|
||||
shouldAutoSave,
|
||||
} = state.canvas;
|
||||
|
||||
const fp32 = vaePrecision === 'fp32';
|
||||
|
||||
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||
boundingBoxScaleMethod
|
||||
);
|
||||
|
||||
let modelLoaderNodeId = MAIN_MODEL_LOADER;
|
||||
|
||||
const use_cpu = shouldUseNoiseSettings
|
||||
@ -141,18 +148,11 @@ export const buildCanvasOutpaintGraph = (
|
||||
is_intermediate: true,
|
||||
mask2: canvasMaskImage,
|
||||
},
|
||||
[MASK_BLUR]: {
|
||||
type: 'img_blur',
|
||||
id: MASK_BLUR,
|
||||
is_intermediate: true,
|
||||
radius: maskBlur,
|
||||
blur_type: maskBlurMethod,
|
||||
},
|
||||
[INPAINT_IMAGE]: {
|
||||
type: 'i2l',
|
||||
id: INPAINT_IMAGE,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
[NOISE]: {
|
||||
type: 'noise',
|
||||
@ -164,7 +164,7 @@ export const buildCanvasOutpaintGraph = (
|
||||
type: 'create_denoise_mask',
|
||||
id: INPAINT_CREATE_MASK,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
[DENOISE_LATENTS]: {
|
||||
type: 'denoise_latents',
|
||||
@ -202,7 +202,7 @@ export const buildCanvasOutpaintGraph = (
|
||||
type: 'l2i',
|
||||
id: LATENTS_TO_IMAGE,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
[CANVAS_OUTPUT]: {
|
||||
type: 'color_correct',
|
||||
@ -333,7 +333,7 @@ export const buildCanvasOutpaintGraph = (
|
||||
// Create Inpaint Mask
|
||||
{
|
||||
source: {
|
||||
node_id: MASK_BLUR,
|
||||
node_id: isUsingScaledDimensions ? MASK_RESIZE_UP : MASK_COMBINE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
@ -443,6 +443,16 @@ export const buildCanvasOutpaintGraph = (
|
||||
field: 'latents',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: INPAINT_INFILL,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
// Decode the result from Inpaint
|
||||
{
|
||||
source: {
|
||||
@ -463,6 +473,7 @@ export const buildCanvasOutpaintGraph = (
|
||||
type: 'infill_patchmatch',
|
||||
id: INPAINT_INFILL,
|
||||
is_intermediate: true,
|
||||
downscale: infillPatchmatchDownscaleSize,
|
||||
};
|
||||
}
|
||||
|
||||
@ -474,17 +485,25 @@ export const buildCanvasOutpaintGraph = (
|
||||
};
|
||||
}
|
||||
|
||||
if (infillMethod === 'cv2') {
|
||||
graph.nodes[INPAINT_INFILL] = {
|
||||
type: 'infill_cv2',
|
||||
id: INPAINT_INFILL,
|
||||
is_intermediate: true,
|
||||
};
|
||||
}
|
||||
|
||||
if (infillMethod === 'tile') {
|
||||
graph.nodes[INPAINT_INFILL] = {
|
||||
type: 'infill_tile',
|
||||
id: INPAINT_INFILL,
|
||||
is_intermediate: true,
|
||||
tile_size: tileSize,
|
||||
tile_size: infillTileSize,
|
||||
};
|
||||
}
|
||||
|
||||
// Handle Scale Before Processing
|
||||
if (['auto', 'manual'].includes(boundingBoxScaleMethod)) {
|
||||
if (isUsingScaledDimensions) {
|
||||
const scaledWidth: number = scaledBoundingBoxDimensions.width;
|
||||
const scaledHeight: number = scaledBoundingBoxDimensions.height;
|
||||
|
||||
@ -546,16 +565,6 @@ export const buildCanvasOutpaintGraph = (
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: INPAINT_INFILL,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
// Take combined mask and resize and then blur
|
||||
{
|
||||
source: {
|
||||
@ -567,16 +576,7 @@ export const buildCanvasOutpaintGraph = (
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: MASK_RESIZE_UP,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: MASK_BLUR,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
|
||||
// Resize Results Down
|
||||
{
|
||||
source: {
|
||||
@ -658,32 +658,8 @@ export const buildCanvasOutpaintGraph = (
|
||||
...(graph.nodes[INPAINT_IMAGE] as ImageToLatentsInvocation),
|
||||
image: canvasInitImage,
|
||||
};
|
||||
graph.nodes[MASK_BLUR] = {
|
||||
...(graph.nodes[MASK_BLUR] as ImageBlurInvocation),
|
||||
};
|
||||
|
||||
graph.edges.push(
|
||||
// Take combined mask and plug it to blur
|
||||
{
|
||||
source: {
|
||||
node_id: MASK_COMBINE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: MASK_BLUR,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: INPAINT_INFILL,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
// Color Correct The Inpainted Result
|
||||
{
|
||||
source: {
|
||||
@ -707,7 +683,7 @@ export const buildCanvasOutpaintGraph = (
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: MASK_BLUR,
|
||||
node_id: MASK_COMBINE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
@ -718,6 +694,115 @@ export const buildCanvasOutpaintGraph = (
|
||||
);
|
||||
}
|
||||
|
||||
// Handle Coherence Mode
|
||||
if (canvasCoherenceMode !== 'unmasked') {
|
||||
graph.nodes[CANVAS_COHERENCE_INPAINT_CREATE_MASK] = {
|
||||
type: 'create_denoise_mask',
|
||||
id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
is_intermediate: true,
|
||||
fp32,
|
||||
};
|
||||
|
||||
// Handle Image Input For Mask Creation
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: INPAINT_INFILL,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'image',
|
||||
},
|
||||
});
|
||||
|
||||
// Create Mask If Coherence Mode Is Mask
|
||||
if (canvasCoherenceMode === 'mask') {
|
||||
if (isUsingScaledDimensions) {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: MASK_RESIZE_UP,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'mask',
|
||||
},
|
||||
});
|
||||
} else {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: MASK_COMBINE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'mask',
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (canvasCoherenceMode === 'edge') {
|
||||
graph.nodes[CANVAS_COHERENCE_MASK_EDGE] = {
|
||||
type: 'mask_edge',
|
||||
id: CANVAS_COHERENCE_MASK_EDGE,
|
||||
is_intermediate: true,
|
||||
edge_blur: maskBlur,
|
||||
edge_size: maskBlur * 2,
|
||||
low_threshold: 100,
|
||||
high_threshold: 200,
|
||||
};
|
||||
|
||||
// Handle Scaled Dimensions For Mask Edge
|
||||
if (isUsingScaledDimensions) {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: MASK_RESIZE_UP,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_MASK_EDGE,
|
||||
field: 'image',
|
||||
},
|
||||
});
|
||||
} else {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: MASK_COMBINE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_MASK_EDGE,
|
||||
field: 'image',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: CANVAS_COHERENCE_MASK_EDGE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'mask',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// Plug Denoise Mask To Coherence Denoise Latents
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'denoise_mask',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_DENOISE_LATENTS,
|
||||
field: 'denoise_mask',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// Handle Seed
|
||||
if (shouldRandomizeSeed) {
|
||||
// Random int node to generate the starting seed
|
||||
|
@ -67,6 +67,8 @@ export const buildCanvasSDXLImageToImageGraph = (
|
||||
shouldAutoSave,
|
||||
} = state.canvas;
|
||||
|
||||
const fp32 = vaePrecision === 'fp32';
|
||||
|
||||
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||
boundingBoxScaleMethod
|
||||
);
|
||||
@ -133,7 +135,7 @@ export const buildCanvasSDXLImageToImageGraph = (
|
||||
type: 'i2l',
|
||||
id: IMAGE_TO_LATENTS,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
[SDXL_DENOISE_LATENTS]: {
|
||||
type: 'denoise_latents',
|
||||
@ -258,7 +260,7 @@ export const buildCanvasSDXLImageToImageGraph = (
|
||||
id: LATENTS_TO_IMAGE,
|
||||
type: 'l2i',
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
};
|
||||
graph.nodes[CANVAS_OUTPUT] = {
|
||||
id: CANVAS_OUTPUT,
|
||||
@ -305,7 +307,7 @@ export const buildCanvasSDXLImageToImageGraph = (
|
||||
type: 'l2i',
|
||||
id: CANVAS_OUTPUT,
|
||||
is_intermediate: !shouldAutoSave,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
};
|
||||
|
||||
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image =
|
||||
|
@ -6,6 +6,7 @@ import {
|
||||
ImageBlurInvocation,
|
||||
ImageDTO,
|
||||
ImageToLatentsInvocation,
|
||||
MaskEdgeInvocation,
|
||||
NoiseInvocation,
|
||||
RandomIntInvocation,
|
||||
RangeOfSizeInvocation,
|
||||
@ -19,6 +20,8 @@ import { addVAEToGraph } from './addVAEToGraph';
|
||||
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
|
||||
import {
|
||||
CANVAS_COHERENCE_DENOISE_LATENTS,
|
||||
CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
CANVAS_COHERENCE_MASK_EDGE,
|
||||
CANVAS_COHERENCE_NOISE,
|
||||
CANVAS_COHERENCE_NOISE_INCREMENT,
|
||||
CANVAS_OUTPUT,
|
||||
@ -68,6 +71,7 @@ export const buildCanvasSDXLInpaintGraph = (
|
||||
shouldUseCpuNoise,
|
||||
maskBlur,
|
||||
maskBlurMethod,
|
||||
canvasCoherenceMode,
|
||||
canvasCoherenceSteps,
|
||||
canvasCoherenceStrength,
|
||||
seamlessXAxis,
|
||||
@ -96,6 +100,12 @@ export const buildCanvasSDXLInpaintGraph = (
|
||||
shouldAutoSave,
|
||||
} = state.canvas;
|
||||
|
||||
const fp32 = vaePrecision === 'fp32';
|
||||
|
||||
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||
boundingBoxScaleMethod
|
||||
);
|
||||
|
||||
let modelLoaderNodeId = SDXL_MODEL_LOADER;
|
||||
|
||||
const use_cpu = shouldUseNoiseSettings
|
||||
@ -137,7 +147,7 @@ export const buildCanvasSDXLInpaintGraph = (
|
||||
type: 'i2l',
|
||||
id: INPAINT_IMAGE,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
[NOISE]: {
|
||||
type: 'noise',
|
||||
@ -149,7 +159,7 @@ export const buildCanvasSDXLInpaintGraph = (
|
||||
type: 'create_denoise_mask',
|
||||
id: INPAINT_CREATE_MASK,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
[SDXL_DENOISE_LATENTS]: {
|
||||
type: 'denoise_latents',
|
||||
@ -177,7 +187,7 @@ export const buildCanvasSDXLInpaintGraph = (
|
||||
},
|
||||
[CANVAS_COHERENCE_DENOISE_LATENTS]: {
|
||||
type: 'denoise_latents',
|
||||
id: SDXL_DENOISE_LATENTS,
|
||||
id: CANVAS_COHERENCE_DENOISE_LATENTS,
|
||||
is_intermediate: true,
|
||||
steps: canvasCoherenceSteps,
|
||||
cfg_scale: cfg_scale,
|
||||
@ -189,7 +199,7 @@ export const buildCanvasSDXLInpaintGraph = (
|
||||
type: 'l2i',
|
||||
id: LATENTS_TO_IMAGE,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
[CANVAS_OUTPUT]: {
|
||||
type: 'color_correct',
|
||||
@ -433,7 +443,7 @@ export const buildCanvasSDXLInpaintGraph = (
|
||||
};
|
||||
|
||||
// Handle Scale Before Processing
|
||||
if (['auto', 'manual'].includes(boundingBoxScaleMethod)) {
|
||||
if (isUsingScaledDimensions) {
|
||||
const scaledWidth: number = scaledBoundingBoxDimensions.width;
|
||||
const scaledHeight: number = scaledBoundingBoxDimensions.height;
|
||||
|
||||
@ -596,6 +606,116 @@ export const buildCanvasSDXLInpaintGraph = (
|
||||
);
|
||||
}
|
||||
|
||||
// Handle Coherence Mode
|
||||
if (canvasCoherenceMode !== 'unmasked') {
|
||||
// Create Mask If Coherence Mode Is Not Full
|
||||
graph.nodes[CANVAS_COHERENCE_INPAINT_CREATE_MASK] = {
|
||||
type: 'create_denoise_mask',
|
||||
id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
is_intermediate: true,
|
||||
fp32,
|
||||
};
|
||||
|
||||
// Handle Image Input For Mask Creation
|
||||
if (isUsingScaledDimensions) {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: INPAINT_IMAGE_RESIZE_UP,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'image',
|
||||
},
|
||||
});
|
||||
} else {
|
||||
graph.nodes[CANVAS_COHERENCE_INPAINT_CREATE_MASK] = {
|
||||
...(graph.nodes[
|
||||
CANVAS_COHERENCE_INPAINT_CREATE_MASK
|
||||
] as CreateDenoiseMaskInvocation),
|
||||
image: canvasInitImage,
|
||||
};
|
||||
}
|
||||
|
||||
// Create Mask If Coherence Mode Is Mask
|
||||
if (canvasCoherenceMode === 'mask') {
|
||||
if (isUsingScaledDimensions) {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: MASK_RESIZE_UP,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'mask',
|
||||
},
|
||||
});
|
||||
} else {
|
||||
graph.nodes[CANVAS_COHERENCE_INPAINT_CREATE_MASK] = {
|
||||
...(graph.nodes[
|
||||
CANVAS_COHERENCE_INPAINT_CREATE_MASK
|
||||
] as CreateDenoiseMaskInvocation),
|
||||
mask: canvasMaskImage,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Create Mask Edge If Coherence Mode Is Edge
|
||||
if (canvasCoherenceMode === 'edge') {
|
||||
graph.nodes[CANVAS_COHERENCE_MASK_EDGE] = {
|
||||
type: 'mask_edge',
|
||||
id: CANVAS_COHERENCE_MASK_EDGE,
|
||||
is_intermediate: true,
|
||||
edge_blur: maskBlur,
|
||||
edge_size: maskBlur * 2,
|
||||
low_threshold: 100,
|
||||
high_threshold: 200,
|
||||
};
|
||||
|
||||
// Handle Scaled Dimensions For Mask Edge
|
||||
if (isUsingScaledDimensions) {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: MASK_RESIZE_UP,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_MASK_EDGE,
|
||||
field: 'image',
|
||||
},
|
||||
});
|
||||
} else {
|
||||
graph.nodes[CANVAS_COHERENCE_MASK_EDGE] = {
|
||||
...(graph.nodes[CANVAS_COHERENCE_MASK_EDGE] as MaskEdgeInvocation),
|
||||
image: canvasMaskImage,
|
||||
};
|
||||
}
|
||||
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: CANVAS_COHERENCE_MASK_EDGE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'mask',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// Plug Denoise Mask To Coherence Denoise Latents
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'denoise_mask',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_DENOISE_LATENTS,
|
||||
field: 'denoise_mask',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// Handle Seed
|
||||
if (shouldRandomizeSeed) {
|
||||
// Random int node to generate the starting seed
|
||||
|
@ -2,7 +2,6 @@ import { logger } from 'app/logging/logger';
|
||||
import { RootState } from 'app/store/store';
|
||||
import { NonNullableGraph } from 'features/nodes/types/types';
|
||||
import {
|
||||
ImageBlurInvocation,
|
||||
ImageDTO,
|
||||
ImageToLatentsInvocation,
|
||||
InfillPatchMatchInvocation,
|
||||
@ -20,6 +19,8 @@ import { addVAEToGraph } from './addVAEToGraph';
|
||||
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
|
||||
import {
|
||||
CANVAS_COHERENCE_DENOISE_LATENTS,
|
||||
CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
CANVAS_COHERENCE_MASK_EDGE,
|
||||
CANVAS_COHERENCE_NOISE,
|
||||
CANVAS_COHERENCE_NOISE_INCREMENT,
|
||||
CANVAS_OUTPUT,
|
||||
@ -31,7 +32,6 @@ import {
|
||||
INPAINT_INFILL_RESIZE_DOWN,
|
||||
ITERATE,
|
||||
LATENTS_TO_IMAGE,
|
||||
MASK_BLUR,
|
||||
MASK_COMBINE,
|
||||
MASK_FROM_ALPHA,
|
||||
MASK_RESIZE_DOWN,
|
||||
@ -72,10 +72,11 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
shouldUseNoiseSettings,
|
||||
shouldUseCpuNoise,
|
||||
maskBlur,
|
||||
maskBlurMethod,
|
||||
canvasCoherenceMode,
|
||||
canvasCoherenceSteps,
|
||||
canvasCoherenceStrength,
|
||||
tileSize,
|
||||
infillTileSize,
|
||||
infillPatchmatchDownscaleSize,
|
||||
infillMethod,
|
||||
seamlessXAxis,
|
||||
seamlessYAxis,
|
||||
@ -103,6 +104,12 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
shouldAutoSave,
|
||||
} = state.canvas;
|
||||
|
||||
const fp32 = vaePrecision === 'fp32';
|
||||
|
||||
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||
boundingBoxScaleMethod
|
||||
);
|
||||
|
||||
let modelLoaderNodeId = SDXL_MODEL_LOADER;
|
||||
|
||||
const use_cpu = shouldUseNoiseSettings
|
||||
@ -145,18 +152,11 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
is_intermediate: true,
|
||||
mask2: canvasMaskImage,
|
||||
},
|
||||
[MASK_BLUR]: {
|
||||
type: 'img_blur',
|
||||
id: MASK_BLUR,
|
||||
is_intermediate: true,
|
||||
radius: maskBlur,
|
||||
blur_type: maskBlurMethod,
|
||||
},
|
||||
[INPAINT_IMAGE]: {
|
||||
type: 'i2l',
|
||||
id: INPAINT_IMAGE,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
[NOISE]: {
|
||||
type: 'noise',
|
||||
@ -168,7 +168,7 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
type: 'create_denoise_mask',
|
||||
id: INPAINT_CREATE_MASK,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
[SDXL_DENOISE_LATENTS]: {
|
||||
type: 'denoise_latents',
|
||||
@ -208,7 +208,7 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
type: 'l2i',
|
||||
id: LATENTS_TO_IMAGE,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
[CANVAS_OUTPUT]: {
|
||||
type: 'color_correct',
|
||||
@ -348,7 +348,7 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
// Create Inpaint Mask
|
||||
{
|
||||
source: {
|
||||
node_id: MASK_BLUR,
|
||||
node_id: isUsingScaledDimensions ? MASK_RESIZE_UP : MASK_COMBINE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
@ -410,7 +410,7 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: SDXL_MODEL_LOADER,
|
||||
node_id: modelLoaderNodeId,
|
||||
field: 'unet',
|
||||
},
|
||||
destination: {
|
||||
@ -458,6 +458,16 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
field: 'latents',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: INPAINT_INFILL,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
// Decode inpainted latents to image
|
||||
{
|
||||
source: {
|
||||
@ -473,12 +483,12 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
};
|
||||
|
||||
// Add Infill Nodes
|
||||
|
||||
if (infillMethod === 'patchmatch') {
|
||||
graph.nodes[INPAINT_INFILL] = {
|
||||
type: 'infill_patchmatch',
|
||||
id: INPAINT_INFILL,
|
||||
is_intermediate: true,
|
||||
downscale: infillPatchmatchDownscaleSize,
|
||||
};
|
||||
}
|
||||
|
||||
@ -490,17 +500,25 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
};
|
||||
}
|
||||
|
||||
if (infillMethod === 'cv2') {
|
||||
graph.nodes[INPAINT_INFILL] = {
|
||||
type: 'infill_cv2',
|
||||
id: INPAINT_INFILL,
|
||||
is_intermediate: true,
|
||||
};
|
||||
}
|
||||
|
||||
if (infillMethod === 'tile') {
|
||||
graph.nodes[INPAINT_INFILL] = {
|
||||
type: 'infill_tile',
|
||||
id: INPAINT_INFILL,
|
||||
is_intermediate: true,
|
||||
tile_size: tileSize,
|
||||
tile_size: infillTileSize,
|
||||
};
|
||||
}
|
||||
|
||||
// Handle Scale Before Processing
|
||||
if (['auto', 'manual'].includes(boundingBoxScaleMethod)) {
|
||||
if (isUsingScaledDimensions) {
|
||||
const scaledWidth: number = scaledBoundingBoxDimensions.width;
|
||||
const scaledHeight: number = scaledBoundingBoxDimensions.height;
|
||||
|
||||
@ -562,16 +580,7 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: INPAINT_INFILL,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
|
||||
// Take combined mask and resize and then blur
|
||||
{
|
||||
source: {
|
||||
@ -583,16 +592,7 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: MASK_RESIZE_UP,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: MASK_BLUR,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
|
||||
// Resize Results Down
|
||||
{
|
||||
source: {
|
||||
@ -674,32 +674,8 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
...(graph.nodes[INPAINT_IMAGE] as ImageToLatentsInvocation),
|
||||
image: canvasInitImage,
|
||||
};
|
||||
graph.nodes[MASK_BLUR] = {
|
||||
...(graph.nodes[MASK_BLUR] as ImageBlurInvocation),
|
||||
};
|
||||
|
||||
graph.edges.push(
|
||||
// Take combined mask and plug it to blur
|
||||
{
|
||||
source: {
|
||||
node_id: MASK_COMBINE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: MASK_BLUR,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: INPAINT_INFILL,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
// Color Correct The Inpainted Result
|
||||
{
|
||||
source: {
|
||||
@ -723,7 +699,7 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: MASK_BLUR,
|
||||
node_id: MASK_COMBINE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
@ -734,7 +710,116 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
);
|
||||
}
|
||||
|
||||
// Handle seed
|
||||
// Handle Coherence Mode
|
||||
if (canvasCoherenceMode !== 'unmasked') {
|
||||
graph.nodes[CANVAS_COHERENCE_INPAINT_CREATE_MASK] = {
|
||||
type: 'create_denoise_mask',
|
||||
id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
is_intermediate: true,
|
||||
fp32,
|
||||
};
|
||||
|
||||
// Handle Image Input For Mask Creation
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: INPAINT_INFILL,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'image',
|
||||
},
|
||||
});
|
||||
|
||||
// Create Mask If Coherence Mode Is Mask
|
||||
if (canvasCoherenceMode === 'mask') {
|
||||
if (isUsingScaledDimensions) {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: MASK_RESIZE_UP,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'mask',
|
||||
},
|
||||
});
|
||||
} else {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: MASK_COMBINE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'mask',
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (canvasCoherenceMode === 'edge') {
|
||||
graph.nodes[CANVAS_COHERENCE_MASK_EDGE] = {
|
||||
type: 'mask_edge',
|
||||
id: CANVAS_COHERENCE_MASK_EDGE,
|
||||
is_intermediate: true,
|
||||
edge_blur: maskBlur,
|
||||
edge_size: maskBlur * 2,
|
||||
low_threshold: 100,
|
||||
high_threshold: 200,
|
||||
};
|
||||
|
||||
// Handle Scaled Dimensions For Mask Edge
|
||||
if (isUsingScaledDimensions) {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: MASK_RESIZE_UP,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_MASK_EDGE,
|
||||
field: 'image',
|
||||
},
|
||||
});
|
||||
} else {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: MASK_COMBINE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_MASK_EDGE,
|
||||
field: 'image',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: CANVAS_COHERENCE_MASK_EDGE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'mask',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// Plug Denoise Mask To Coherence Denoise Latents
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: CANVAS_COHERENCE_INPAINT_CREATE_MASK,
|
||||
field: 'denoise_mask',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_COHERENCE_DENOISE_LATENTS,
|
||||
field: 'denoise_mask',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// Handle Seed
|
||||
if (shouldRandomizeSeed) {
|
||||
// Random int node to generate the starting seed
|
||||
const randomIntNode: RandomIntInvocation = {
|
||||
|
@ -61,6 +61,8 @@ export const buildCanvasSDXLTextToImageGraph = (
|
||||
shouldAutoSave,
|
||||
} = state.canvas;
|
||||
|
||||
const fp32 = vaePrecision === 'fp32';
|
||||
|
||||
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||
boundingBoxScaleMethod
|
||||
);
|
||||
@ -252,7 +254,7 @@ export const buildCanvasSDXLTextToImageGraph = (
|
||||
id: LATENTS_TO_IMAGE,
|
||||
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
};
|
||||
|
||||
graph.nodes[CANVAS_OUTPUT] = {
|
||||
@ -290,7 +292,7 @@ export const buildCanvasSDXLTextToImageGraph = (
|
||||
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
|
||||
id: CANVAS_OUTPUT,
|
||||
is_intermediate: !shouldAutoSave,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
};
|
||||
|
||||
graph.edges.push({
|
||||
|
@ -59,6 +59,8 @@ export const buildCanvasTextToImageGraph = (
|
||||
shouldAutoSave,
|
||||
} = state.canvas;
|
||||
|
||||
const fp32 = vaePrecision === 'fp32';
|
||||
|
||||
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||
boundingBoxScaleMethod
|
||||
);
|
||||
@ -238,7 +240,7 @@ export const buildCanvasTextToImageGraph = (
|
||||
id: LATENTS_TO_IMAGE,
|
||||
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
};
|
||||
|
||||
graph.nodes[CANVAS_OUTPUT] = {
|
||||
@ -276,7 +278,7 @@ export const buildCanvasTextToImageGraph = (
|
||||
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
|
||||
id: CANVAS_OUTPUT,
|
||||
is_intermediate: !shouldAutoSave,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
};
|
||||
|
||||
graph.edges.push({
|
||||
|
@ -84,6 +84,8 @@ export const buildLinearImageToImageGraph = (
|
||||
throw new Error('No model found in state');
|
||||
}
|
||||
|
||||
const fp32 = vaePrecision === 'fp32';
|
||||
|
||||
let modelLoaderNodeId = MAIN_MODEL_LOADER;
|
||||
|
||||
const use_cpu = shouldUseNoiseSettings
|
||||
@ -122,7 +124,7 @@ export const buildLinearImageToImageGraph = (
|
||||
[LATENTS_TO_IMAGE]: {
|
||||
type: 'l2i',
|
||||
id: LATENTS_TO_IMAGE,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
[DENOISE_LATENTS]: {
|
||||
type: 'denoise_latents',
|
||||
@ -140,7 +142,7 @@ export const buildLinearImageToImageGraph = (
|
||||
// image: {
|
||||
// image_name: initialImage.image_name,
|
||||
// },
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
},
|
||||
edges: [
|
||||
|
@ -84,6 +84,8 @@ export const buildLinearSDXLImageToImageGraph = (
|
||||
throw new Error('No model found in state');
|
||||
}
|
||||
|
||||
const fp32 = vaePrecision === 'fp32';
|
||||
|
||||
// Model Loader ID
|
||||
let modelLoaderNodeId = SDXL_MODEL_LOADER;
|
||||
|
||||
@ -124,7 +126,7 @@ export const buildLinearSDXLImageToImageGraph = (
|
||||
[LATENTS_TO_IMAGE]: {
|
||||
type: 'l2i',
|
||||
id: LATENTS_TO_IMAGE,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
[SDXL_DENOISE_LATENTS]: {
|
||||
type: 'denoise_latents',
|
||||
@ -144,7 +146,7 @@ export const buildLinearSDXLImageToImageGraph = (
|
||||
// image: {
|
||||
// image_name: initialImage.image_name,
|
||||
// },
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
},
|
||||
edges: [
|
||||
|
@ -62,6 +62,8 @@ export const buildLinearSDXLTextToImageGraph = (
|
||||
throw new Error('No model found in state');
|
||||
}
|
||||
|
||||
const fp32 = vaePrecision === 'fp32';
|
||||
|
||||
// Construct Style Prompt
|
||||
const { craftedPositiveStylePrompt, craftedNegativeStylePrompt } =
|
||||
craftSDXLStylePrompt(state, shouldConcatSDXLStylePrompt);
|
||||
@ -118,7 +120,7 @@ export const buildLinearSDXLTextToImageGraph = (
|
||||
[LATENTS_TO_IMAGE]: {
|
||||
type: 'l2i',
|
||||
id: LATENTS_TO_IMAGE,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
},
|
||||
edges: [
|
||||
|
@ -57,6 +57,8 @@ export const buildLinearTextToImageGraph = (
|
||||
throw new Error('No model found in state');
|
||||
}
|
||||
|
||||
const fp32 = vaePrecision === 'fp32';
|
||||
|
||||
const isUsingOnnxModel = model.model_type === 'onnx';
|
||||
|
||||
let modelLoaderNodeId = isUsingOnnxModel
|
||||
@ -139,7 +141,7 @@ export const buildLinearTextToImageGraph = (
|
||||
[LATENTS_TO_IMAGE]: {
|
||||
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
|
||||
id: LATENTS_TO_IMAGE,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
fp32,
|
||||
},
|
||||
},
|
||||
edges: [
|
||||
|
@ -27,11 +27,15 @@ export const INPAINT_INFILL = 'inpaint_infill';
|
||||
export const INPAINT_INFILL_RESIZE_DOWN = 'inpaint_infill_resize_down';
|
||||
export const INPAINT_FINAL_IMAGE = 'inpaint_final_image';
|
||||
export const INPAINT_CREATE_MASK = 'inpaint_create_mask';
|
||||
export const INPAINT_MASK = 'inpaint_mask';
|
||||
export const CANVAS_COHERENCE_DENOISE_LATENTS =
|
||||
'canvas_coherence_denoise_latents';
|
||||
export const CANVAS_COHERENCE_NOISE = 'canvas_coherence_noise';
|
||||
export const CANVAS_COHERENCE_NOISE_INCREMENT =
|
||||
'canvas_coherence_noise_increment';
|
||||
export const CANVAS_COHERENCE_MASK_EDGE = 'canvas_coherence_mask_edge';
|
||||
export const CANVAS_COHERENCE_INPAINT_CREATE_MASK =
|
||||
'canvas_coherence_inpaint_create_mask';
|
||||
export const MASK_FROM_ALPHA = 'tomask';
|
||||
export const MASK_EDGE = 'mask_edge';
|
||||
export const MASK_BLUR = 'mask_blur';
|
||||
|
@ -0,0 +1,42 @@
|
||||
import type { RootState } from 'app/store/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { IAISelectDataType } from 'common/components/IAIMantineSearchableSelect';
|
||||
import IAIMantineSelect from 'common/components/IAIMantineSelect';
|
||||
import { setCanvasCoherenceMode } from 'features/parameters/store/generationSlice';
|
||||
import { CanvasCoherenceModeParam } from 'features/parameters/types/parameterSchemas';
|
||||
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const coherenceModeSelectData: IAISelectDataType[] = [
|
||||
{ label: 'Unmasked', value: 'unmasked' },
|
||||
{ label: 'Mask', value: 'mask' },
|
||||
{ label: 'Mask Edge', value: 'edge' },
|
||||
];
|
||||
|
||||
const ParamCanvasCoherenceMode = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const canvasCoherenceMode = useAppSelector(
|
||||
(state: RootState) => state.generation.canvasCoherenceMode
|
||||
);
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleCoherenceModeChange = (v: string | null) => {
|
||||
if (!v) {
|
||||
return;
|
||||
}
|
||||
|
||||
dispatch(setCanvasCoherenceMode(v as CanvasCoherenceModeParam));
|
||||
};
|
||||
|
||||
return (
|
||||
<IAIMantineSelect
|
||||
label={t('parameters.coherenceMode')}
|
||||
data={coherenceModeSelectData}
|
||||
value={canvasCoherenceMode}
|
||||
onChange={handleCoherenceModeChange}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(ParamCanvasCoherenceMode);
|
@ -3,6 +3,7 @@ import IAICollapse from 'common/components/IAICollapse';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import SubParametersWrapper from '../../SubParametersWrapper';
|
||||
import ParamCanvasCoherenceMode from './CoherencePass/ParamCanvasCoherenceMode';
|
||||
import ParamCanvasCoherenceSteps from './CoherencePass/ParamCanvasCoherenceSteps';
|
||||
import ParamCanvasCoherenceStrength from './CoherencePass/ParamCanvasCoherenceStrength';
|
||||
import ParamMaskBlur from './MaskAdjustment/ParamMaskBlur';
|
||||
@ -14,15 +15,16 @@ const ParamCompositingSettingsCollapse = () => {
|
||||
return (
|
||||
<IAICollapse label={t('parameters.compositingSettingsHeader')}>
|
||||
<Flex sx={{ flexDirection: 'column', gap: 2 }}>
|
||||
<SubParametersWrapper label={t('parameters.coherencePassHeader')}>
|
||||
<ParamCanvasCoherenceMode />
|
||||
<ParamCanvasCoherenceSteps />
|
||||
<ParamCanvasCoherenceStrength />
|
||||
</SubParametersWrapper>
|
||||
<Divider />
|
||||
<SubParametersWrapper label={t('parameters.maskAdjustmentsHeader')}>
|
||||
<ParamMaskBlur />
|
||||
<ParamMaskBlurMethod />
|
||||
</SubParametersWrapper>
|
||||
<Divider />
|
||||
<SubParametersWrapper label={t('parameters.coherencePassHeader')}>
|
||||
<ParamCanvasCoherenceSteps />
|
||||
<ParamCanvasCoherenceStrength />
|
||||
</SubParametersWrapper>
|
||||
</Flex>
|
||||
</IAICollapse>
|
||||
);
|
||||
|
@ -5,7 +5,7 @@ import { useTranslation } from 'react-i18next';
|
||||
import IAICollapse from 'common/components/IAICollapse';
|
||||
import SubParametersWrapper from '../../SubParametersWrapper';
|
||||
import ParamInfillMethod from './ParamInfillMethod';
|
||||
import ParamInfillTilesize from './ParamInfillTilesize';
|
||||
import ParamInfillOptions from './ParamInfillOptions';
|
||||
import ParamScaleBeforeProcessing from './ParamScaleBeforeProcessing';
|
||||
import ParamScaledHeight from './ParamScaledHeight';
|
||||
import ParamScaledWidth from './ParamScaledWidth';
|
||||
@ -18,7 +18,7 @@ const ParamInfillCollapse = () => {
|
||||
<Flex sx={{ gap: 2, flexDirection: 'column' }}>
|
||||
<SubParametersWrapper>
|
||||
<ParamInfillMethod />
|
||||
<ParamInfillTilesize />
|
||||
<ParamInfillOptions />
|
||||
</SubParametersWrapper>
|
||||
<Divider />
|
||||
<SubParametersWrapper>
|
||||
|
@ -27,9 +27,7 @@ const ParamInfillMethod = () => {
|
||||
|
||||
const { data: appConfigData, isLoading } = useGetAppConfigQuery();
|
||||
|
||||
const infill_methods = appConfigData?.infill_methods.filter(
|
||||
(method) => method !== 'lama'
|
||||
);
|
||||
const infill_methods = appConfigData?.infill_methods;
|
||||
|
||||
const { t } = useTranslation();
|
||||
|
||||
|
@ -0,0 +1,29 @@
|
||||
import { Flex } from '@chakra-ui/react';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import { generationSelector } from 'features/parameters/store/generationSelectors';
|
||||
import ParamInfillPatchmatchDownscaleSize from './ParamInfillPatchmatchDownscaleSize';
|
||||
import ParamInfillTilesize from './ParamInfillTilesize';
|
||||
|
||||
const selector = createSelector(
|
||||
[generationSelector],
|
||||
(parameters) => {
|
||||
const { infillMethod } = parameters;
|
||||
|
||||
return {
|
||||
infillMethod,
|
||||
};
|
||||
},
|
||||
defaultSelectorOptions
|
||||
);
|
||||
|
||||
export default function ParamInfillOptions() {
|
||||
const { infillMethod } = useAppSelector(selector);
|
||||
return (
|
||||
<Flex>
|
||||
{infillMethod === 'tile' && <ParamInfillTilesize />}
|
||||
{infillMethod === 'patchmatch' && <ParamInfillPatchmatchDownscaleSize />}
|
||||
</Flex>
|
||||
);
|
||||
}
|
@ -0,0 +1,58 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import IAISlider from 'common/components/IAISlider';
|
||||
import { generationSelector } from 'features/parameters/store/generationSelectors';
|
||||
import { setInfillPatchmatchDownscaleSize } from 'features/parameters/store/generationSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const selector = createSelector(
|
||||
[generationSelector],
|
||||
(parameters) => {
|
||||
const { infillPatchmatchDownscaleSize, infillMethod } = parameters;
|
||||
|
||||
return {
|
||||
infillPatchmatchDownscaleSize,
|
||||
infillMethod,
|
||||
};
|
||||
},
|
||||
defaultSelectorOptions
|
||||
);
|
||||
|
||||
const ParamInfillPatchmatchDownscaleSize = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const { infillPatchmatchDownscaleSize, infillMethod } =
|
||||
useAppSelector(selector);
|
||||
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleChange = useCallback(
|
||||
(v: number) => {
|
||||
dispatch(setInfillPatchmatchDownscaleSize(v));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
const handleReset = useCallback(() => {
|
||||
dispatch(setInfillPatchmatchDownscaleSize(2));
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
<IAISlider
|
||||
isDisabled={infillMethod !== 'patchmatch'}
|
||||
label={t('parameters.patchmatchDownScaleSize')}
|
||||
min={1}
|
||||
max={10}
|
||||
value={infillPatchmatchDownscaleSize}
|
||||
onChange={handleChange}
|
||||
withInput
|
||||
withSliderMarks
|
||||
withReset
|
||||
handleReset={handleReset}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(ParamInfillPatchmatchDownscaleSize);
|
@ -3,7 +3,7 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import IAISlider from 'common/components/IAISlider';
|
||||
import { generationSelector } from 'features/parameters/store/generationSelectors';
|
||||
import { setTileSize } from 'features/parameters/store/generationSlice';
|
||||
import { setInfillTileSize } from 'features/parameters/store/generationSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@ -11,10 +11,10 @@ import { useTranslation } from 'react-i18next';
|
||||
const selector = createSelector(
|
||||
[generationSelector],
|
||||
(parameters) => {
|
||||
const { tileSize, infillMethod } = parameters;
|
||||
const { infillTileSize, infillMethod } = parameters;
|
||||
|
||||
return {
|
||||
tileSize,
|
||||
infillTileSize,
|
||||
infillMethod,
|
||||
};
|
||||
},
|
||||
@ -23,19 +23,19 @@ const selector = createSelector(
|
||||
|
||||
const ParamInfillTileSize = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const { tileSize, infillMethod } = useAppSelector(selector);
|
||||
const { infillTileSize, infillMethod } = useAppSelector(selector);
|
||||
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleChange = useCallback(
|
||||
(v: number) => {
|
||||
dispatch(setTileSize(v));
|
||||
dispatch(setInfillTileSize(v));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
const handleReset = useCallback(() => {
|
||||
dispatch(setTileSize(32));
|
||||
dispatch(setInfillTileSize(32));
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
@ -45,7 +45,7 @@ const ParamInfillTileSize = () => {
|
||||
min={16}
|
||||
max={64}
|
||||
sliderNumberInputProps={{ max: 256 }}
|
||||
value={tileSize}
|
||||
value={infillTileSize}
|
||||
onChange={handleChange}
|
||||
withInput
|
||||
withSliderMarks
|
||||
|
@ -7,6 +7,7 @@ import { ImageDTO } from 'services/api/types';
|
||||
|
||||
import { clipSkipMap } from '../types/constants';
|
||||
import {
|
||||
CanvasCoherenceModeParam,
|
||||
CfgScaleParam,
|
||||
HeightParam,
|
||||
MainModelParam,
|
||||
@ -37,6 +38,7 @@ export interface GenerationState {
|
||||
scheduler: SchedulerParam;
|
||||
maskBlur: number;
|
||||
maskBlurMethod: MaskBlurMethodParam;
|
||||
canvasCoherenceMode: CanvasCoherenceModeParam;
|
||||
canvasCoherenceSteps: number;
|
||||
canvasCoherenceStrength: StrengthParam;
|
||||
seed: SeedParam;
|
||||
@ -47,7 +49,8 @@ export interface GenerationState {
|
||||
shouldUseNoiseSettings: boolean;
|
||||
steps: StepsParam;
|
||||
threshold: number;
|
||||
tileSize: number;
|
||||
infillTileSize: number;
|
||||
infillPatchmatchDownscaleSize: number;
|
||||
variationAmount: number;
|
||||
width: WidthParam;
|
||||
shouldUseSymmetry: boolean;
|
||||
@ -77,6 +80,7 @@ export const initialGenerationState: GenerationState = {
|
||||
scheduler: 'euler',
|
||||
maskBlur: 16,
|
||||
maskBlurMethod: 'box',
|
||||
canvasCoherenceMode: 'edge',
|
||||
canvasCoherenceSteps: 20,
|
||||
canvasCoherenceStrength: 0.3,
|
||||
seed: 0,
|
||||
@ -87,7 +91,8 @@ export const initialGenerationState: GenerationState = {
|
||||
shouldUseNoiseSettings: false,
|
||||
steps: 50,
|
||||
threshold: 0,
|
||||
tileSize: 32,
|
||||
infillTileSize: 32,
|
||||
infillPatchmatchDownscaleSize: 1,
|
||||
variationAmount: 0.1,
|
||||
width: 512,
|
||||
shouldUseSymmetry: false,
|
||||
@ -206,18 +211,30 @@ export const generationSlice = createSlice({
|
||||
setMaskBlurMethod: (state, action: PayloadAction<MaskBlurMethodParam>) => {
|
||||
state.maskBlurMethod = action.payload;
|
||||
},
|
||||
setCanvasCoherenceMode: (
|
||||
state,
|
||||
action: PayloadAction<CanvasCoherenceModeParam>
|
||||
) => {
|
||||
state.canvasCoherenceMode = action.payload;
|
||||
},
|
||||
setCanvasCoherenceSteps: (state, action: PayloadAction<number>) => {
|
||||
state.canvasCoherenceSteps = action.payload;
|
||||
},
|
||||
setCanvasCoherenceStrength: (state, action: PayloadAction<number>) => {
|
||||
state.canvasCoherenceStrength = action.payload;
|
||||
},
|
||||
setTileSize: (state, action: PayloadAction<number>) => {
|
||||
state.tileSize = action.payload;
|
||||
},
|
||||
setInfillMethod: (state, action: PayloadAction<string>) => {
|
||||
state.infillMethod = action.payload;
|
||||
},
|
||||
setInfillTileSize: (state, action: PayloadAction<number>) => {
|
||||
state.infillTileSize = action.payload;
|
||||
},
|
||||
setInfillPatchmatchDownscaleSize: (
|
||||
state,
|
||||
action: PayloadAction<number>
|
||||
) => {
|
||||
state.infillPatchmatchDownscaleSize = action.payload;
|
||||
},
|
||||
setShouldUseSymmetry: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldUseSymmetry = action.payload;
|
||||
},
|
||||
@ -323,6 +340,7 @@ export const {
|
||||
setScheduler,
|
||||
setMaskBlur,
|
||||
setMaskBlurMethod,
|
||||
setCanvasCoherenceMode,
|
||||
setCanvasCoherenceSteps,
|
||||
setCanvasCoherenceStrength,
|
||||
setSeed,
|
||||
@ -332,7 +350,8 @@ export const {
|
||||
setShouldRandomizeSeed,
|
||||
setSteps,
|
||||
setThreshold,
|
||||
setTileSize,
|
||||
setInfillTileSize,
|
||||
setInfillPatchmatchDownscaleSize,
|
||||
setVariationAmount,
|
||||
setShouldUseSymmetry,
|
||||
setHorizontalSymmetrySteps,
|
||||
|
@ -418,6 +418,22 @@ export const isValidMaskBlurMethod = (
|
||||
val: unknown
|
||||
): val is MaskBlurMethodParam => zMaskBlurMethod.safeParse(val).success;
|
||||
|
||||
/**
|
||||
* Zod schema for a Canvas Coherence Mode method parameter
|
||||
*/
|
||||
export const zCanvasCoherenceMode = z.enum(['unmasked', 'mask', 'edge']);
|
||||
/**
|
||||
* Type alias for Canvas Coherence Mode parameter, inferred from its zod schema
|
||||
*/
|
||||
export type CanvasCoherenceModeParam = z.infer<typeof zCanvasCoherenceMode>;
|
||||
/**
|
||||
* Validates/type-guards a value as a mask blur method parameter
|
||||
*/
|
||||
export const isValidCoherenceModeParam = (
|
||||
val: unknown
|
||||
): val is CanvasCoherenceModeParam =>
|
||||
zCanvasCoherenceMode.safeParse(val).success;
|
||||
|
||||
// /**
|
||||
// * Zod schema for BaseModelType
|
||||
// */
|
||||
|
File diff suppressed because one or more lines are too long
@ -112,6 +112,7 @@ export type ImageScaleInvocation = s['ImageScaleInvocation'];
|
||||
export type InfillPatchMatchInvocation = s['InfillPatchMatchInvocation'];
|
||||
export type InfillTileInvocation = s['InfillTileInvocation'];
|
||||
export type CreateDenoiseMaskInvocation = s['CreateDenoiseMaskInvocation'];
|
||||
export type MaskEdgeInvocation = s['MaskEdgeInvocation'];
|
||||
export type RandomIntInvocation = s['RandomIntInvocation'];
|
||||
export type CompelInvocation = s['CompelInvocation'];
|
||||
export type DynamicPromptInvocation = s['DynamicPromptInvocation'];
|
||||
|
Loading…
Reference in New Issue
Block a user