feat(ui): sdxl graphs

This commit is contained in:
psychedelicious 2024-06-25 15:14:54 +10:00
parent a62b4a26ef
commit fbfdd3e003
7 changed files with 88 additions and 105 deletions

View File

@ -1,7 +1,6 @@
import type { KonvaNodeManager } from 'features/controlLayers/konva/nodeManager';
import type { CanvasV2State, Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import type { ParameterStrength } from 'features/parameters/types/parameterSchemas';
import { isEqual, pick } from 'lodash-es';
import type { Invocation } from 'services/api/types';
@ -10,14 +9,13 @@ export const addImageToImage = async (
manager: KonvaNodeManager,
l2i: Invocation<'l2i'>,
denoise: Invocation<'denoise_latents'>,
vaeSource: Invocation<'main_model_loader' | 'seamless' | 'vae_loader'>,
imageOutput: Invocation<'canvas_paste_back' | 'img_nsfw' | 'img_resize' | 'img_watermark' | 'l2i'>,
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'seamless' | 'vae_loader'>,
originalSize: Dimensions,
scaledSize: Dimensions,
bbox: CanvasV2State['bbox'],
strength: ParameterStrength
denoising_start: number
): Promise<Invocation<'img_resize' | 'l2i'>> => {
denoise.denoising_start = 1 - strength;
denoise.denoising_start = denoising_start;
const cropBbox = pick(bbox, ['x', 'y', 'width', 'height']);
const initialImage = await manager.util.getImageSourceImage({

View File

@ -1,7 +1,7 @@
import type { KonvaNodeManager } from 'features/controlLayers/konva/nodeManager';
import type { CanvasV2State, Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import type { ParameterPrecision, ParameterStrength } from 'features/parameters/types/parameterSchemas';
import type { ParameterPrecision } from 'features/parameters/types/parameterSchemas';
import { isEqual, pick } from 'lodash-es';
import type { Invocation } from 'services/api/types';
@ -10,16 +10,16 @@ export const addInpaint = async (
manager: KonvaNodeManager,
l2i: Invocation<'l2i'>,
denoise: Invocation<'denoise_latents'>,
vaeSource: Invocation<'main_model_loader' | 'seamless' | 'vae_loader'>,
modelLoader: Invocation<'main_model_loader'>,
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'seamless' | 'vae_loader'>,
modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader'>,
originalSize: Dimensions,
scaledSize: Dimensions,
bbox: CanvasV2State['bbox'],
compositing: CanvasV2State['compositing'],
strength: ParameterStrength,
denoising_start: number,
vaePrecision: ParameterPrecision
): Promise<Invocation<'canvas_paste_back'>> => {
denoise.denoising_start = 1 - strength;
denoise.denoising_start = denoising_start;
const cropBbox = pick(bbox, ['x', 'y', 'width', 'height']);
const initialImage = await manager.util.getImageSourceImage({
@ -121,7 +121,6 @@ export const addInpaint = async (
type: 'canvas_paste_back',
mask_blur: compositing.maskBlur,
source_image: { image_name: initialImage.image_name },
mask: { image_name: maskImage.image_name },
});
g.addEdge(alphaToMask, 'image', createGradientMask, 'mask');
g.addEdge(i2l, 'latents', denoise, 'latents');
@ -129,6 +128,8 @@ export const addInpaint = async (
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
g.addEdge(modelLoader, 'unet', createGradientMask, 'unet');
g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask');
g.addEdge(createGradientMask, 'expanded_mask_area', canvasPasteBack, 'mask');
g.addEdge(l2i, 'image', canvasPasteBack, 'target_image');
return canvasPasteBack;

View File

@ -10,21 +10,13 @@ import type { Invocation } from 'services/api/types';
*/
export const addNSFWChecker = (
g: Graph,
imageOutput: Invocation<'l2i'> | Invocation<'img_nsfw'> | Invocation<'img_watermark'> | Invocation<'img_resize'>
imageOutput: Invocation<'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_paste_back'>
): Invocation<'img_nsfw'> => {
const nsfw = g.addNode({
id: NSFW_CHECKER,
type: 'img_nsfw',
is_intermediate: imageOutput.is_intermediate,
board: imageOutput.board,
use_cache: false,
});
// The NSFW checker node is the new image output - make the previous one intermediate
imageOutput.is_intermediate = true;
imageOutput.use_cache = true;
imageOutput.board = undefined;
g.addEdge(imageOutput, 'image', nsfw, 'image');
return nsfw;

View File

@ -2,7 +2,7 @@ import type { KonvaNodeManager } from 'features/controlLayers/konva/nodeManager'
import type { CanvasV2State, Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getInfill } from 'features/nodes/util/graph/graphBuilderUtils';
import type { ParameterPrecision, ParameterStrength } from 'features/parameters/types/parameterSchemas';
import type { ParameterPrecision } from 'features/parameters/types/parameterSchemas';
import { isEqual, pick } from 'lodash-es';
import type { Invocation } from 'services/api/types';
@ -11,17 +11,15 @@ export const addOutpaint = async (
manager: KonvaNodeManager,
l2i: Invocation<'l2i'>,
denoise: Invocation<'denoise_latents'>,
vaeSource: Invocation<'main_model_loader' | 'seamless' | 'vae_loader'>,
modelLoader: Invocation<'main_model_loader'>,
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'seamless' | 'vae_loader'>,
modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader'>,
originalSize: Dimensions,
scaledSize: Dimensions,
bbox: CanvasV2State['bbox'],
compositing: CanvasV2State['compositing'],
strength: ParameterStrength,
denoising_start: number,
vaePrecision: ParameterPrecision
): Promise<Invocation<'canvas_paste_back'>> => {
denoise.denoising_start = 1 - strength;
const cropBbox = pick(bbox, ['x', 'y', 'width', 'height']);
const initialImage = await manager.util.getImageSourceImage({
bbox: cropBbox,
@ -56,21 +54,21 @@ export const addOutpaint = async (
g.addEdge(initialImageAlphaToMask, 'image', maskCombine, 'mask2');
// Resize the combined and initial image to the scaled size
const resizeMaskToScaledSize = g.addNode({
const resizeInputMaskToScaledSize = g.addNode({
id: 'resize_mask_to_scaled_size',
type: 'img_resize',
...scaledSize,
});
g.addEdge(maskCombine, 'image', resizeMaskToScaledSize, 'image');
g.addEdge(maskCombine, 'image', resizeInputMaskToScaledSize, 'image');
// Resize the initial image to the scaled size and infill
const resizeImageToScaledSize = g.addNode({
const resizeInputImageToScaledSize = g.addNode({
id: 'resize_image_to_scaled_size',
type: 'img_resize',
image: { image_name: initialImage.image_name },
...scaledSize,
});
g.addEdge(resizeImageToScaledSize, 'image', infill, 'image');
g.addEdge(resizeInputImageToScaledSize, 'image', infill, 'image');
// Create the gradient denoising mask from the combined mask
const createGradientMask = g.addNode({
@ -82,7 +80,7 @@ export const addOutpaint = async (
fp32: vaePrecision === 'fp32',
});
g.addEdge(infill, 'image', createGradientMask, 'image');
g.addEdge(maskCombine, 'image', createGradientMask, 'mask');
g.addEdge(resizeInputMaskToScaledSize, 'image', createGradientMask, 'mask');
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
g.addEdge(modelLoader, 'unet', createGradientMask, 'unet');
g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask');
@ -94,12 +92,12 @@ export const addOutpaint = async (
g.addEdge(i2l, 'latents', denoise, 'latents');
// Resize the output image back to the original size
const resizeImageToOriginalSize = g.addNode({
const resizeOutputImageToOriginalSize = g.addNode({
id: 'resize_image_to_original_size',
type: 'img_resize',
...originalSize,
});
const resizeMaskToOriginalSize = g.addNode({
const resizeOutputMaskToOriginalSize = g.addNode({
id: 'resize_mask_to_original_size',
type: 'img_resize',
...originalSize,
@ -114,12 +112,12 @@ export const addOutpaint = async (
// Resize initial image and mask to scaled size, feed into to gradient mask
// After denoising, resize the image and mask back to original size
g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image');
g.addEdge(createGradientMask, 'expanded_mask_area', resizeMaskToOriginalSize, 'image');
g.addEdge(l2i, 'image', resizeOutputImageToOriginalSize, 'image');
g.addEdge(createGradientMask, 'expanded_mask_area', resizeOutputMaskToOriginalSize, 'image');
// Finally, paste the generated masked image back onto the original image
g.addEdge(resizeImageToOriginalSize, 'image', canvasPasteBack, 'target_image');
g.addEdge(resizeMaskToOriginalSize, 'image', canvasPasteBack, 'mask');
g.addEdge(resizeOutputImageToOriginalSize, 'image', canvasPasteBack, 'target_image');
g.addEdge(resizeOutputMaskToOriginalSize, 'image', canvasPasteBack, 'mask');
return canvasPasteBack;
} else {
@ -154,7 +152,6 @@ export const addOutpaint = async (
id: 'canvas_paste_back',
type: 'canvas_paste_back',
mask_blur: compositing.maskBlur,
mask: { image_name: maskImage.image_name },
});
g.addEdge(maskAlphaToMask, 'image', maskCombine, 'mask1');
g.addEdge(initialImageAlphaToMask, 'image', maskCombine, 'mask2');
@ -165,6 +162,7 @@ export const addOutpaint = async (
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
g.addEdge(modelLoader, 'unet', createGradientMask, 'unet');
g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask');
g.addEdge(createGradientMask, 'expanded_mask_area', canvasPasteBack, 'mask');
g.addEdge(infill, 'image', canvasPasteBack, 'source_image');
g.addEdge(l2i, 'image', canvasPasteBack, 'target_image');

View File

@ -10,21 +10,13 @@ import type { Invocation } from 'services/api/types';
*/
export const addWatermarker = (
g: Graph,
imageOutput: Invocation<'l2i'> | Invocation<'img_nsfw'> | Invocation<'img_watermark'> | Invocation<'img_resize'>
imageOutput: Invocation<'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_paste_back'>
): Invocation<'img_watermark'> => {
const watermark = g.addNode({
id: WATERMARKER,
type: 'img_watermark',
is_intermediate: imageOutput.is_intermediate,
board: imageOutput.board,
use_cache: false,
});
// The watermarker node is the new image output - make the previous one intermediate
imageOutput.is_intermediate = true;
imageOutput.use_cache = true;
imageOutput.board = undefined;
g.addEdge(imageOutput, 'image', watermark, 'image');
return watermark;

View File

@ -157,7 +157,9 @@ export const buildSD1Graph = async (state: RootState, manager: KonvaNodeManager)
addLoRAs(state, g, denoise, modelLoader, seamless, clipSkip, posCond, negCond);
// We might get the VAE from the main model, custom VAE, or seamless node.
const vaeSource: Invocation<'main_model_loader' | 'seamless' | 'vae_loader'> = seamless ?? vaeLoader ?? modelLoader;
const vaeSource: Invocation<
'main_model_loader' | 'sdxl_model_loader' | 'sdxl_model_loader' | 'seamless' | 'vae_loader'
> = seamless ?? vaeLoader ?? modelLoader;
g.addEdge(vaeSource, 'vae', l2i, 'vae');
if (generationMode === 'txt2img') {
@ -169,11 +171,10 @@ export const buildSD1Graph = async (state: RootState, manager: KonvaNodeManager)
l2i,
denoise,
vaeSource,
imageOutput,
originalSize,
scaledSize,
bbox,
params.img2imgStrength
1 - params.img2imgStrength
);
} else if (generationMode === 'inpaint') {
const { compositing } = state.canvasV2;
@ -188,7 +189,7 @@ export const buildSD1Graph = async (state: RootState, manager: KonvaNodeManager)
scaledSize,
bbox,
compositing,
params.img2imgStrength,
1 - params.img2imgStrength,
vaePrecision
);
} else if (generationMode === 'outpaint') {
@ -204,7 +205,7 @@ export const buildSD1Graph = async (state: RootState, manager: KonvaNodeManager)
scaledSize,
bbox,
compositing,
params.img2imgStrength,
1 - params.img2imgStrength,
vaePrecision
);
}

View File

@ -14,15 +14,18 @@ import {
VAE_LOADER,
} from 'features/nodes/util/graph/constants';
import { addControlAdapters } from 'features/nodes/util/graph/generation/addControlAdapters';
import { addImageToImage } from 'features/nodes/util/graph/generation/addImageToImage';
import { addInpaint } from 'features/nodes/util/graph/generation/addInpaint';
import { addIPAdapters } from 'features/nodes/util/graph/generation/addIPAdapters';
import { addNSFWChecker } from 'features/nodes/util/graph/generation/addNSFWChecker';
import { addOutpaint } from 'features/nodes/util/graph/generation/addOutpaint';
import { addSDXLLoRAs } from 'features/nodes/util/graph/generation/addSDXLLoRAs';
import { addSDXLRefiner } from 'features/nodes/util/graph/generation/addSDXLRefiner';
import { addSeamless } from 'features/nodes/util/graph/generation/addSeamless';
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getBoardField, getSDXLStylePrompts, getSizes } from 'features/nodes/util/graph/graphBuilderUtils';
import { isEqual, pick } from 'lodash-es';
import type { Invocation, NonNullableGraph } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import { assert } from 'tsafe';
@ -48,7 +51,6 @@ export const buildSDXLGraph = async (state: RootState, manager: KonvaNodeManager
negativePrompt,
refinerModel,
refinerStart,
img2imgStrength,
} = params;
assert(model, 'No model found in state');
@ -105,10 +107,6 @@ export const buildSDXLGraph = async (state: RootState, manager: KonvaNodeManager
type: 'l2i',
id: LATENTS_TO_IMAGE,
fp32: vaePrecision === 'fp32',
board: getBoardField(state),
// This is the terminal node and must always save to gallery.
is_intermediate: false,
use_cache: false,
});
const vaeLoader =
vae?.base === model.base
@ -119,8 +117,7 @@ export const buildSDXLGraph = async (state: RootState, manager: KonvaNodeManager
})
: null;
let imageOutput: Invocation<'l2i'> | Invocation<'img_nsfw'> | Invocation<'img_watermark'> | Invocation<'img_resize'> =
l2i;
let imageOutput: Invocation<'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_paste_back'> = l2i;
g.addEdge(modelLoader, 'unet', denoise, 'unet');
g.addEdge(modelLoader, 'clip', posCond, 'clip');
@ -169,52 +166,51 @@ export const buildSDXLGraph = async (state: RootState, manager: KonvaNodeManager
}
if (generationMode === 'txt2img') {
if (!isEqual(scaledSize, originalSize)) {
// We are using scaled bbox and need to resize the output image back to the original size.
imageOutput = g.addNode({
id: 'img_resize',
type: 'img_resize',
...originalSize,
is_intermediate: false,
use_cache: false,
});
g.addEdge(l2i, 'image', imageOutput, 'image');
}
imageOutput = addTextToImage(g, l2i, originalSize, scaledSize);
} else if (generationMode === 'img2img') {
denoise.denoising_start = refinerModel ? Math.min(refinerStart, 1 - img2imgStrength) : 1 - img2imgStrength;
const { image_name } = await manager.util.getImageSourceImage({
bbox: pick(bbox, ['x', 'y', 'width', 'height']),
preview: true,
});
if (!isEqual(scaledSize, originalSize)) {
// We are using scaled bbox and need to resize the output image back to the original size.
const initialImageResize = g.addNode({
id: 'initial_image_resize',
type: 'img_resize',
...scaledSize,
image: { image_name },
});
const i2l = g.addNode({ id: 'i2l', type: 'i2l' });
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(initialImageResize, 'image', i2l, 'image');
g.addEdge(i2l, 'latents', denoise, 'latents');
imageOutput = g.addNode({
id: 'img_resize',
type: 'img_resize',
...originalSize,
is_intermediate: false,
use_cache: false,
});
g.addEdge(l2i, 'image', imageOutput, 'image');
} else {
const i2l = g.addNode({ id: 'i2l', type: 'i2l', image: { image_name } });
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(i2l, 'latents', denoise, 'latents');
}
imageOutput = await addImageToImage(
g,
manager,
l2i,
denoise,
vaeSource,
originalSize,
scaledSize,
bbox,
refinerModel ? Math.min(refinerStart, 1 - params.img2imgStrength) : 1 - params.img2imgStrength
);
} else if (generationMode === 'inpaint') {
const { compositing } = state.canvasV2;
imageOutput = await addInpaint(
g,
manager,
l2i,
denoise,
vaeSource,
modelLoader,
originalSize,
scaledSize,
bbox,
compositing,
refinerModel ? Math.min(refinerStart, 1 - params.img2imgStrength) : 1 - params.img2imgStrength,
vaePrecision
);
} else if (generationMode === 'outpaint') {
const { compositing } = state.canvasV2;
imageOutput = await addOutpaint(
g,
manager,
l2i,
denoise,
vaeSource,
modelLoader,
originalSize,
scaledSize,
bbox,
compositing,
refinerModel ? Math.min(refinerStart, 1 - params.img2imgStrength) : 1 - params.img2imgStrength,
vaePrecision
);
}
const _addedCAs = addControlAdapters(state.canvasV2.controlAdapters.entities, g, denoise, modelConfig.base);
@ -241,6 +237,11 @@ export const buildSDXLGraph = async (state: RootState, manager: KonvaNodeManager
imageOutput = addWatermarker(g, imageOutput);
}
// This is the terminal node and must always save to gallery.
imageOutput.is_intermediate = false;
imageOutput.use_cache = false;
imageOutput.board = getBoardField(state);
g.setMetadataReceivingNode(imageOutput);
return g.getGraph();
};