feat(ui): outpaint graph, organize builder a bit

This commit is contained in:
psychedelicious 2024-06-24 19:31:58 +10:00
parent b97b8c6ce6
commit ebc498ad19
6 changed files with 471 additions and 167 deletions

View File

@ -0,0 +1,56 @@
import type { KonvaNodeManager } from 'features/controlLayers/konva/nodeManager';
import type { CanvasV2State, Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import type { ParameterStrength } from 'features/parameters/types/parameterSchemas';
import { isEqual, pick } from 'lodash-es';
import type { Invocation } from 'services/api/types';
export const addImageToImage = async (
g: Graph,
manager: KonvaNodeManager,
l2i: Invocation<'l2i'>,
denoise: Invocation<'denoise_latents'>,
vaeSource: Invocation<'main_model_loader' | 'seamless' | 'vae_loader'>,
imageOutput: Invocation<'canvas_paste_back' | 'img_nsfw' | 'img_resize' | 'img_watermark' | 'l2i'>,
originalSize: Dimensions,
scaledSize: Dimensions,
bbox: CanvasV2State['bbox'],
strength: ParameterStrength
) => {
denoise.denoising_start = 1 - strength;
const cropBbox = pick(bbox, ['x', 'y', 'width', 'height']);
const initialImage = await manager.util.getImageSourceImage({
bbox: cropBbox,
preview: true,
});
if (!isEqual(scaledSize, originalSize)) {
// Resize the initial image to the scaled size, denoise, then resize back to the original size
const resizeImageToScaledSize = g.addNode({
id: 'initial_image_resize_in',
type: 'img_resize',
image: { image_name: initialImage.image_name },
...scaledSize,
});
const i2l = g.addNode({ id: 'i2l', type: 'i2l' });
const resizeImageToOriginalSize = g.addNode({
id: 'initial_image_resize_out',
type: 'img_resize',
...originalSize,
});
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(resizeImageToScaledSize, 'image', i2l, 'image');
g.addEdge(i2l, 'latents', denoise, 'latents');
g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image');
// This is the new output node
imageOutput = resizeImageToOriginalSize;
} else {
// No need to resize, just denoise
const i2l = g.addNode({ id: 'i2l', type: 'i2l', image: { image_name: initialImage.image_name } });
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(i2l, 'latents', denoise, 'latents');
}
};

View File

@ -0,0 +1,137 @@
import type { KonvaNodeManager } from 'features/controlLayers/konva/nodeManager';
import type { CanvasV2State, Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import type { ParameterPrecision, ParameterStrength } from 'features/parameters/types/parameterSchemas';
import { isEqual, pick } from 'lodash-es';
import type { Invocation } from 'services/api/types';
export const addInpaint = async (
g: Graph,
manager: KonvaNodeManager,
l2i: Invocation<'l2i'>,
denoise: Invocation<'denoise_latents'>,
vaeSource: Invocation<'main_model_loader' | 'seamless' | 'vae_loader'>,
modelLoader: Invocation<'main_model_loader'>,
imageOutput: Invocation<'canvas_paste_back' | 'img_nsfw' | 'img_resize' | 'img_watermark' | 'l2i'>,
originalSize: Dimensions,
scaledSize: Dimensions,
bbox: CanvasV2State['bbox'],
compositing: CanvasV2State['compositing'],
strength: ParameterStrength,
vaePrecision: ParameterPrecision
) => {
denoise.denoising_start = 1 - strength;
const cropBbox = pick(bbox, ['x', 'y', 'width', 'height']);
const initialImage = await manager.util.getImageSourceImage({
bbox: cropBbox,
preview: true,
});
const maskImage = await manager.util.getInpaintMaskImage({
bbox: cropBbox,
preview: true,
});
if (!isEqual(scaledSize, originalSize)) {
// Scale before processing requires some resizing
const i2l = g.addNode({ id: 'i2l', type: 'i2l' });
const resizeImageToScaledSize = g.addNode({
id: 'resize_image_to_scaled_size',
type: 'img_resize',
image: { image_name: initialImage.image_name },
...scaledSize,
});
const alphaToMask = g.addNode({
id: 'alpha_to_mask',
type: 'tomask',
image: { image_name: maskImage.image_name },
invert: true,
});
const resizeMaskToScaledSize = g.addNode({
id: 'resize_mask_to_scaled_size',
type: 'img_resize',
...scaledSize,
});
const resizeImageToOriginalSize = g.addNode({
id: 'resize_image_to_original_size',
type: 'img_resize',
...originalSize,
});
const resizeMaskToOriginalSize = g.addNode({
id: 'resize_mask_to_original_size',
type: 'img_resize',
...originalSize,
});
const createGradientMask = g.addNode({
id: 'create_gradient_mask',
type: 'create_gradient_mask',
coherence_mode: compositing.canvasCoherenceMode,
minimum_denoise: compositing.canvasCoherenceMinDenoise,
edge_radius: compositing.canvasCoherenceEdgeSize,
fp32: vaePrecision === 'fp32',
});
const canvasPasteBack = g.addNode({
id: 'canvas_paste_back',
type: 'canvas_paste_back',
mask_blur: compositing.maskBlur,
source_image: { image_name: initialImage.image_name },
});
// Resize initial image and mask to scaled size, feed into to gradient mask
g.addEdge(alphaToMask, 'image', resizeMaskToScaledSize, 'image');
g.addEdge(resizeImageToScaledSize, 'image', i2l, 'image');
g.addEdge(i2l, 'latents', denoise, 'latents');
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
g.addEdge(modelLoader, 'unet', createGradientMask, 'unet');
g.addEdge(resizeImageToScaledSize, 'image', createGradientMask, 'image');
g.addEdge(resizeMaskToScaledSize, 'image', createGradientMask, 'mask');
g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask');
// After denoising, resize the image and mask back to original size
g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image');
g.addEdge(createGradientMask, 'expanded_mask_area', resizeMaskToOriginalSize, 'image');
// Finally, paste the generated masked image back onto the original image
g.addEdge(resizeImageToOriginalSize, 'image', canvasPasteBack, 'target_image');
g.addEdge(resizeMaskToOriginalSize, 'image', canvasPasteBack, 'mask');
imageOutput = canvasPasteBack;
} else {
// No scale before processing, much simpler
const i2l = g.addNode({ id: 'i2l', type: 'i2l', image: { image_name: initialImage.image_name } });
const alphaToMask = g.addNode({
id: 'alpha_to_mask',
type: 'tomask',
image: { image_name: maskImage.image_name },
invert: true,
});
const createGradientMask = g.addNode({
id: 'create_gradient_mask',
type: 'create_gradient_mask',
coherence_mode: compositing.canvasCoherenceMode,
minimum_denoise: compositing.canvasCoherenceMinDenoise,
edge_radius: compositing.canvasCoherenceEdgeSize,
fp32: vaePrecision === 'fp32',
image: { image_name: initialImage.image_name },
});
const canvasPasteBack = g.addNode({
id: 'canvas_paste_back',
type: 'canvas_paste_back',
mask_blur: compositing.maskBlur,
source_image: { image_name: initialImage.image_name },
mask: { image_name: maskImage.image_name },
});
g.addEdge(alphaToMask, 'image', createGradientMask, 'mask');
g.addEdge(i2l, 'latents', denoise, 'latents');
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
g.addEdge(modelLoader, 'unet', createGradientMask, 'unet');
g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask');
g.addEdge(l2i, 'image', canvasPasteBack, 'target_image');
imageOutput = canvasPasteBack;
}
};

View File

@ -0,0 +1,152 @@
import type { KonvaNodeManager } from 'features/controlLayers/konva/nodeManager';
import type { CanvasV2State, Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getInfill } from 'features/nodes/util/graph/graphBuilderUtils';
import type { ParameterPrecision, ParameterStrength } from 'features/parameters/types/parameterSchemas';
import { isEqual, pick } from 'lodash-es';
import type { Invocation } from 'services/api/types';
export const addOutpaint = async (
g: Graph,
manager: KonvaNodeManager,
l2i: Invocation<'l2i'>,
denoise: Invocation<'denoise_latents'>,
vaeSource: Invocation<'main_model_loader' | 'seamless' | 'vae_loader'>,
modelLoader: Invocation<'main_model_loader'>,
imageOutput: Invocation<'canvas_paste_back' | 'img_nsfw' | 'img_resize' | 'img_watermark' | 'l2i'>,
originalSize: Dimensions,
scaledSize: Dimensions,
bbox: CanvasV2State['bbox'],
compositing: CanvasV2State['compositing'],
strength: ParameterStrength,
vaePrecision: ParameterPrecision
) => {
denoise.denoising_start = 1 - strength;
const cropBbox = pick(bbox, ['x', 'y', 'width', 'height']);
const initialImage = await manager.util.getImageSourceImage({
bbox: cropBbox,
preview: true,
});
const maskImage = await manager.util.getInpaintMaskImage({
bbox: cropBbox,
preview: true,
});
const infill = getInfill(g, compositing);
if (!isEqual(scaledSize, originalSize)) {
// Scale before processing requires some resizing
const i2l = g.addNode({ id: 'i2l', type: 'i2l' });
const resizeImageToScaledSize = g.addNode({
id: 'resize_image_to_scaled_size',
type: 'img_resize',
image: { image_name: initialImage.image_name },
...scaledSize,
});
const alphaToMask = g.addNode({
id: 'alpha_to_mask',
type: 'tomask',
image: { image_name: maskImage.image_name },
invert: true,
});
const resizeMaskToScaledSize = g.addNode({
id: 'resize_mask_to_scaled_size',
type: 'img_resize',
...scaledSize,
});
const resizeImageToOriginalSize = g.addNode({
id: 'resize_image_to_original_size',
type: 'img_resize',
...originalSize,
});
const resizeMaskToOriginalSize = g.addNode({
id: 'resize_mask_to_original_size',
type: 'img_resize',
...originalSize,
});
const createGradientMask = g.addNode({
id: 'create_gradient_mask',
type: 'create_gradient_mask',
coherence_mode: compositing.canvasCoherenceMode,
minimum_denoise: compositing.canvasCoherenceMinDenoise,
edge_radius: compositing.canvasCoherenceEdgeSize,
fp32: vaePrecision === 'fp32',
});
const canvasPasteBack = g.addNode({
id: 'canvas_paste_back',
type: 'canvas_paste_back',
mask_blur: compositing.maskBlur,
source_image: { image_name: initialImage.image_name },
});
// Resize initial image and mask to scaled size, feed into to gradient mask
g.addEdge(alphaToMask, 'image', resizeMaskToScaledSize, 'image');
g.addEdge(resizeImageToScaledSize, 'image', i2l, 'image');
g.addEdge(i2l, 'latents', denoise, 'latents');
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
g.addEdge(modelLoader, 'unet', createGradientMask, 'unet');
g.addEdge(resizeImageToScaledSize, 'image', createGradientMask, 'image');
g.addEdge(resizeMaskToScaledSize, 'image', createGradientMask, 'mask');
g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask');
// After denoising, resize the image and mask back to original size
g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image');
g.addEdge(createGradientMask, 'expanded_mask_area', resizeMaskToOriginalSize, 'image');
// Finally, paste the generated masked image back onto the original image
g.addEdge(resizeImageToOriginalSize, 'image', canvasPasteBack, 'target_image');
g.addEdge(resizeMaskToOriginalSize, 'image', canvasPasteBack, 'mask');
imageOutput = canvasPasteBack;
} else {
infill.image = { image_name: initialImage.image_name };
// No scale before processing, much simpler
const i2l = g.addNode({ id: 'i2l', type: 'i2l' });
const maskAlphaToMask = g.addNode({
id: 'mask_alpha_to_mask',
type: 'tomask',
image: { image_name: maskImage.image_name },
invert: true,
});
const initialImageAlphaToMask = g.addNode({
id: 'image_alpha_to_mask',
type: 'tomask',
image: { image_name: initialImage.image_name },
});
const maskCombine = g.addNode({
id: 'mask_combine',
type: 'mask_combine',
});
const createGradientMask = g.addNode({
id: 'create_gradient_mask',
type: 'create_gradient_mask',
coherence_mode: compositing.canvasCoherenceMode,
minimum_denoise: compositing.canvasCoherenceMinDenoise,
edge_radius: compositing.canvasCoherenceEdgeSize,
fp32: vaePrecision === 'fp32',
image: { image_name: initialImage.image_name },
});
const canvasPasteBack = g.addNode({
id: 'canvas_paste_back',
type: 'canvas_paste_back',
mask_blur: compositing.maskBlur,
mask: { image_name: maskImage.image_name },
});
g.addEdge(maskAlphaToMask, 'image', maskCombine, 'mask1');
g.addEdge(initialImageAlphaToMask, 'image', maskCombine, 'mask2');
g.addEdge(maskCombine, 'image', createGradientMask, 'mask');
g.addEdge(infill, 'image', i2l, 'image');
g.addEdge(i2l, 'latents', denoise, 'latents');
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
g.addEdge(modelLoader, 'unet', createGradientMask, 'unet');
g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask');
g.addEdge(infill, 'image', canvasPasteBack, 'source_image');
g.addEdge(l2i, 'image', canvasPasteBack, 'target_image');
imageOutput = canvasPasteBack;
}
};

View File

@ -0,0 +1,25 @@
import type { Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
export const addTextToImage = (
g: Graph,
l2i: Invocation<'l2i'>,
imageOutput: Invocation<'canvas_paste_back' | 'img_nsfw' | 'img_resize' | 'img_watermark' | 'l2i'>,
originalSize: Dimensions,
scaledSize: Dimensions
) => {
if (!isEqual(scaledSize, originalSize)) {
// We need to resize the output image back to the original size
const resizeImageToOriginalSize = g.addNode({
id: 'resize_image_to_original_size',
type: 'img_resize',
...originalSize,
});
g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image');
// This is the new output node
imageOutput = resizeImageToOriginalSize;
}
};

View File

@ -15,16 +15,19 @@ import {
VAE_LOADER,
} from 'features/nodes/util/graph/constants';
import { addControlAdapters } from 'features/nodes/util/graph/generation/addControlAdapters';
import { addImageToImage } from 'features/nodes/util/graph/generation/addImageToImage';
import { addInpaint } from 'features/nodes/util/graph/generation/addInpaint';
// import { addHRF } from 'features/nodes/util/graph/generation/addHRF';
import { addIPAdapters } from 'features/nodes/util/graph/generation/addIPAdapters';
import { addLoRAs } from 'features/nodes/util/graph/generation/addLoRAs';
import { addNSFWChecker } from 'features/nodes/util/graph/generation/addNSFWChecker';
import { addOutpaint } from 'features/nodes/util/graph/generation/addOutpaint';
import { addSeamless } from 'features/nodes/util/graph/generation/addSeamless';
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import type { GraphType } from 'features/nodes/util/graph/generation/Graph';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getBoardField, getSizes } from 'features/nodes/util/graph/graphBuilderUtils';
import { isEqual, pick } from 'lodash-es';
import type { Invocation } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import { assert } from 'tsafe';
@ -155,178 +158,58 @@ export const buildSD1Graph = async (state: RootState, manager: KonvaNodeManager)
addLoRAs(state, g, denoise, modelLoader, seamless, clipSkip, posCond, negCond);
// We might get the VAE from the main model, custom VAE, or seamless node.
const vaeSource = seamless ?? vaeLoader ?? modelLoader;
const vaeSource: Invocation<'main_model_loader' | 'seamless' | 'vae_loader'> = seamless ?? vaeLoader ?? modelLoader;
g.addEdge(vaeSource, 'vae', l2i, 'vae');
if (generationMode === 'txt2img') {
if (!isEqual(scaledSize, originalSize)) {
// We need to resize the output image back to the original size
const resizeImageToOriginalSize = g.addNode({
id: 'resize_image_to_original_size',
type: 'img_resize',
...originalSize,
});
g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image');
// This is the new output node
imageOutput = resizeImageToOriginalSize;
}
addTextToImage(g, l2i, imageOutput, originalSize, scaledSize);
} else if (generationMode === 'img2img') {
denoise.denoising_start = 1 - params.img2imgStrength;
const cropBbox = pick(bbox, ['x', 'y', 'width', 'height']);
const initialImage = await manager.util.getImageSourceImage({
bbox: cropBbox,
preview: true,
});
if (!isEqual(scaledSize, originalSize)) {
// Resize the initial image to the scaled size, denoise, then resize back to the original size
const resizeImageToScaledSize = g.addNode({
id: 'initial_image_resize_in',
type: 'img_resize',
image: { image_name: initialImage.image_name },
...scaledSize,
});
const i2l = g.addNode({ id: 'i2l', type: 'i2l' });
const resizeImageToOriginalSize = g.addNode({
id: 'initial_image_resize_out',
type: 'img_resize',
...originalSize,
});
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(resizeImageToScaledSize, 'image', i2l, 'image');
g.addEdge(i2l, 'latents', denoise, 'latents');
g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image');
// This is the new output node
imageOutput = resizeImageToOriginalSize;
} else {
// No need to resize, just denoise
const i2l = g.addNode({ id: 'i2l', type: 'i2l', image: { image_name: initialImage.image_name } });
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(i2l, 'latents', denoise, 'latents');
}
addImageToImage(
g,
manager,
l2i,
denoise,
vaeSource,
imageOutput,
originalSize,
scaledSize,
bbox,
params.img2imgStrength
);
} else if (generationMode === 'inpaint') {
denoise.denoising_start = 1 - params.img2imgStrength;
const { compositing } = state.canvasV2;
const cropBbox = pick(bbox, ['x', 'y', 'width', 'height']);
const initialImage = await manager.util.getImageSourceImage({
bbox: cropBbox,
preview: true,
});
const maskImage = await manager.util.getInpaintMaskImage({
bbox: cropBbox,
preview: true,
});
if (!isEqual(scaledSize, originalSize)) {
// Scale before processing requires some resizing
const i2l = g.addNode({ id: 'i2l', type: 'i2l' });
const resizeImageToScaledSize = g.addNode({
id: 'resize_image_to_scaled_size',
type: 'img_resize',
image: { image_name: initialImage.image_name },
...scaledSize,
});
const alphaToMask = g.addNode({
id: 'alpha_to_mask',
type: 'tomask',
image: { image_name: maskImage.image_name },
invert: true,
});
const resizeMaskToScaledSize = g.addNode({
id: 'resize_mask_to_scaled_size',
type: 'img_resize',
...scaledSize,
});
const resizeImageToOriginalSize = g.addNode({
id: 'resize_image_to_original_size',
type: 'img_resize',
...originalSize,
});
const resizeMaskToOriginalSize = g.addNode({
id: 'resize_mask_to_original_size',
type: 'img_resize',
...originalSize,
});
const createGradientMask = g.addNode({
id: 'create_gradient_mask',
type: 'create_gradient_mask',
coherence_mode: compositing.canvasCoherenceMode,
minimum_denoise: compositing.canvasCoherenceMinDenoise,
edge_radius: compositing.canvasCoherenceEdgeSize,
fp32: vaePrecision === 'fp32',
});
const canvasPasteBack = g.addNode({
id: 'canvas_paste_back',
type: 'canvas_paste_back',
board: getBoardField(state),
mask_blur: compositing.maskBlur,
source_image: { image_name: initialImage.image_name },
});
// Resize initial image and mask to scaled size, feed into to gradient mask
g.addEdge(alphaToMask, 'image', resizeMaskToScaledSize, 'image');
g.addEdge(resizeImageToScaledSize, 'image', i2l, 'image');
g.addEdge(i2l, 'latents', denoise, 'latents');
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
g.addEdge(modelLoader, 'unet', createGradientMask, 'unet');
g.addEdge(resizeImageToScaledSize, 'image', createGradientMask, 'image');
g.addEdge(resizeMaskToScaledSize, 'image', createGradientMask, 'mask');
g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask');
// After denoising, resize the image and mask back to original size
g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image');
g.addEdge(createGradientMask, 'expanded_mask_area', resizeMaskToOriginalSize, 'image');
// Finally, paste the generated masked image back onto the original image
g.addEdge(resizeImageToOriginalSize, 'image', canvasPasteBack, 'target_image');
g.addEdge(resizeMaskToOriginalSize, 'image', canvasPasteBack, 'mask');
imageOutput = canvasPasteBack;
} else {
// No scale before processing, much simpler
const i2l = g.addNode({ id: 'i2l', type: 'i2l', image: { image_name: initialImage.image_name } });
const alphaToMask = g.addNode({
id: 'alpha_to_mask',
type: 'tomask',
image: { image_name: maskImage.image_name },
invert: true,
});
const createGradientMask = g.addNode({
id: 'create_gradient_mask',
type: 'create_gradient_mask',
coherence_mode: compositing.canvasCoherenceMode,
minimum_denoise: compositing.canvasCoherenceMinDenoise,
edge_radius: compositing.canvasCoherenceEdgeSize,
fp32: vaePrecision === 'fp32',
image: { image_name: initialImage.image_name },
});
const canvasPasteBack = g.addNode({
id: 'canvas_paste_back',
type: 'canvas_paste_back',
board: getBoardField(state),
mask_blur: compositing.maskBlur,
source_image: { image_name: initialImage.image_name },
mask: { image_name: maskImage.image_name },
});
g.addEdge(alphaToMask, 'image', createGradientMask, 'mask');
g.addEdge(i2l, 'latents', denoise, 'latents');
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
g.addEdge(modelLoader, 'unet', createGradientMask, 'unet');
g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask');
g.addEdge(l2i, 'image', canvasPasteBack, 'target_image');
imageOutput = canvasPasteBack;
}
addInpaint(
g,
manager,
l2i,
denoise,
vaeSource,
modelLoader,
imageOutput,
originalSize,
scaledSize,
bbox,
compositing,
params.img2imgStrength,
vaePrecision
);
} else if (generationMode === 'outpaint') {
const { compositing } = state.canvasV2;
addOutpaint(
g,
manager,
l2i,
denoise,
vaeSource,
modelLoader,
imageOutput,
originalSize,
scaledSize,
bbox,
compositing,
params.img2imgStrength,
vaePrecision
);
}
const _addedCAs = addControlAdapters(state.canvasV2.controlAdapters.entities, g, denoise, modelConfig.base);

View File

@ -1,10 +1,13 @@
import type { RootState } from 'app/store/store';
import type { CanvasV2State } from 'features/controlLayers/store/types';
import type { BoardField } from 'features/nodes/types/common';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { buildPresetModifiedPrompt } from 'features/stylePresets/hooks/usePresetModifiedPrompts';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import { pick } from 'lodash-es';
import { stylePresetsApi } from 'services/api/endpoints/stylePresets';
import type { Invocation } from 'services/api/types';
import { assert } from 'tsafe';
/**
* Gets the board field, based on the autoAddBoardId setting.
@ -76,3 +79,51 @@ export const getSizes = (bboxState: CanvasV2State['bbox']) => {
const scaledSize = ['auto', 'manual'].includes(bboxState.scaleMethod) ? bboxState.scaledSize : originalSize;
return { originalSize, scaledSize };
};
export const getInfill = (
g: Graph,
compositing: CanvasV2State['compositing']
): Invocation<'infill_patchmatch' | 'infill_cv2' | 'infill_lama' | 'infill_rgba' | 'infill_tile'> => {
const { infillMethod, infillColorValue, infillPatchmatchDownscaleSize, infillTileSize } = compositing;
// Add Infill Nodes
if (infillMethod === 'patchmatch') {
return g.addNode({
id: 'infill_patchmatch',
type: 'infill_patchmatch',
downscale: infillPatchmatchDownscaleSize,
});
}
if (infillMethod === 'lama') {
return g.addNode({
id: 'infill_lama',
type: 'infill_lama',
});
}
if (infillMethod === 'cv2') {
return g.addNode({
id: 'infill_cv2',
type: 'infill_cv2',
});
}
if (infillMethod === 'tile') {
return g.addNode({
id: 'infill_tile',
type: 'infill_tile',
tile_size: infillTileSize,
});
}
if (infillMethod === 'color') {
return g.addNode({
id: 'infill_rgba',
type: 'infill_rgba',
color: infillColorValue,
});
}
assert(false, 'Unknown infill method');
};