From 84aa4fb7bc05c2e9fc430c3d93f5ae75cbef43d3 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 25 Jun 2024 15:14:54 +1000 Subject: [PATCH] feat(ui): sdxl graphs --- .../util/graph/generation/addImageToImage.ts | 8 +- .../nodes/util/graph/generation/addInpaint.ts | 13 ++- .../util/graph/generation/addNSFWChecker.ts | 10 +- .../util/graph/generation/addOutpaint.ts | 34 +++--- .../util/graph/generation/addWatermarker.ts | 10 +- .../util/graph/generation/buildSD1Graph.ts | 11 +- .../util/graph/generation/buildSDXLGraph.ts | 107 +++++++++--------- 7 files changed, 88 insertions(+), 105 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts index a57036d366..3a839bf3a8 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts @@ -1,7 +1,6 @@ import type { KonvaNodeManager } from 'features/controlLayers/konva/nodeManager'; import type { CanvasV2State, Dimensions } from 'features/controlLayers/store/types'; import type { Graph } from 'features/nodes/util/graph/generation/Graph'; -import type { ParameterStrength } from 'features/parameters/types/parameterSchemas'; import { isEqual, pick } from 'lodash-es'; import type { Invocation } from 'services/api/types'; @@ -10,14 +9,13 @@ export const addImageToImage = async ( manager: KonvaNodeManager, l2i: Invocation<'l2i'>, denoise: Invocation<'denoise_latents'>, - vaeSource: Invocation<'main_model_loader' | 'seamless' | 'vae_loader'>, - imageOutput: Invocation<'canvas_paste_back' | 'img_nsfw' | 'img_resize' | 'img_watermark' | 'l2i'>, + vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'seamless' | 'vae_loader'>, originalSize: Dimensions, scaledSize: Dimensions, bbox: CanvasV2State['bbox'], - strength: ParameterStrength + denoising_start: number ): Promise> => { - denoise.denoising_start = 1 - strength; + denoise.denoising_start = denoising_start; const cropBbox = pick(bbox, ['x', 'y', 'width', 'height']); const initialImage = await manager.util.getImageSourceImage({ diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts index 48cf324901..8c7c7b7d7c 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts @@ -1,7 +1,7 @@ import type { KonvaNodeManager } from 'features/controlLayers/konva/nodeManager'; import type { CanvasV2State, Dimensions } from 'features/controlLayers/store/types'; import type { Graph } from 'features/nodes/util/graph/generation/Graph'; -import type { ParameterPrecision, ParameterStrength } from 'features/parameters/types/parameterSchemas'; +import type { ParameterPrecision } from 'features/parameters/types/parameterSchemas'; import { isEqual, pick } from 'lodash-es'; import type { Invocation } from 'services/api/types'; @@ -10,16 +10,16 @@ export const addInpaint = async ( manager: KonvaNodeManager, l2i: Invocation<'l2i'>, denoise: Invocation<'denoise_latents'>, - vaeSource: Invocation<'main_model_loader' | 'seamless' | 'vae_loader'>, - modelLoader: Invocation<'main_model_loader'>, + vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'seamless' | 'vae_loader'>, + modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader'>, originalSize: Dimensions, scaledSize: Dimensions, bbox: CanvasV2State['bbox'], compositing: CanvasV2State['compositing'], - strength: ParameterStrength, + denoising_start: number, vaePrecision: ParameterPrecision ): Promise> => { - denoise.denoising_start = 1 - strength; + denoise.denoising_start = denoising_start; const cropBbox = pick(bbox, ['x', 'y', 'width', 'height']); const initialImage = await manager.util.getImageSourceImage({ @@ -121,7 +121,6 @@ export const addInpaint = async ( type: 'canvas_paste_back', mask_blur: compositing.maskBlur, source_image: { image_name: initialImage.image_name }, - mask: { image_name: maskImage.image_name }, }); g.addEdge(alphaToMask, 'image', createGradientMask, 'mask'); g.addEdge(i2l, 'latents', denoise, 'latents'); @@ -129,6 +128,8 @@ export const addInpaint = async ( g.addEdge(vaeSource, 'vae', createGradientMask, 'vae'); g.addEdge(modelLoader, 'unet', createGradientMask, 'unet'); g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask'); + g.addEdge(createGradientMask, 'expanded_mask_area', canvasPasteBack, 'mask'); + g.addEdge(l2i, 'image', canvasPasteBack, 'target_image'); return canvasPasteBack; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addNSFWChecker.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addNSFWChecker.ts index 939aa6894c..5a3bb741f5 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addNSFWChecker.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addNSFWChecker.ts @@ -10,21 +10,13 @@ import type { Invocation } from 'services/api/types'; */ export const addNSFWChecker = ( g: Graph, - imageOutput: Invocation<'l2i'> | Invocation<'img_nsfw'> | Invocation<'img_watermark'> | Invocation<'img_resize'> + imageOutput: Invocation<'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_paste_back'> ): Invocation<'img_nsfw'> => { const nsfw = g.addNode({ id: NSFW_CHECKER, type: 'img_nsfw', - is_intermediate: imageOutput.is_intermediate, - board: imageOutput.board, - use_cache: false, }); - // The NSFW checker node is the new image output - make the previous one intermediate - imageOutput.is_intermediate = true; - imageOutput.use_cache = true; - imageOutput.board = undefined; - g.addEdge(imageOutput, 'image', nsfw, 'image'); return nsfw; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts index dad18653c1..86a37f85df 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts @@ -2,7 +2,7 @@ import type { KonvaNodeManager } from 'features/controlLayers/konva/nodeManager' import type { CanvasV2State, Dimensions } from 'features/controlLayers/store/types'; import type { Graph } from 'features/nodes/util/graph/generation/Graph'; import { getInfill } from 'features/nodes/util/graph/graphBuilderUtils'; -import type { ParameterPrecision, ParameterStrength } from 'features/parameters/types/parameterSchemas'; +import type { ParameterPrecision } from 'features/parameters/types/parameterSchemas'; import { isEqual, pick } from 'lodash-es'; import type { Invocation } from 'services/api/types'; @@ -11,17 +11,15 @@ export const addOutpaint = async ( manager: KonvaNodeManager, l2i: Invocation<'l2i'>, denoise: Invocation<'denoise_latents'>, - vaeSource: Invocation<'main_model_loader' | 'seamless' | 'vae_loader'>, - modelLoader: Invocation<'main_model_loader'>, + vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'seamless' | 'vae_loader'>, + modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader'>, originalSize: Dimensions, scaledSize: Dimensions, bbox: CanvasV2State['bbox'], compositing: CanvasV2State['compositing'], - strength: ParameterStrength, + denoising_start: number, vaePrecision: ParameterPrecision ): Promise> => { - denoise.denoising_start = 1 - strength; - const cropBbox = pick(bbox, ['x', 'y', 'width', 'height']); const initialImage = await manager.util.getImageSourceImage({ bbox: cropBbox, @@ -56,21 +54,21 @@ export const addOutpaint = async ( g.addEdge(initialImageAlphaToMask, 'image', maskCombine, 'mask2'); // Resize the combined and initial image to the scaled size - const resizeMaskToScaledSize = g.addNode({ + const resizeInputMaskToScaledSize = g.addNode({ id: 'resize_mask_to_scaled_size', type: 'img_resize', ...scaledSize, }); - g.addEdge(maskCombine, 'image', resizeMaskToScaledSize, 'image'); + g.addEdge(maskCombine, 'image', resizeInputMaskToScaledSize, 'image'); // Resize the initial image to the scaled size and infill - const resizeImageToScaledSize = g.addNode({ + const resizeInputImageToScaledSize = g.addNode({ id: 'resize_image_to_scaled_size', type: 'img_resize', image: { image_name: initialImage.image_name }, ...scaledSize, }); - g.addEdge(resizeImageToScaledSize, 'image', infill, 'image'); + g.addEdge(resizeInputImageToScaledSize, 'image', infill, 'image'); // Create the gradient denoising mask from the combined mask const createGradientMask = g.addNode({ @@ -82,7 +80,7 @@ export const addOutpaint = async ( fp32: vaePrecision === 'fp32', }); g.addEdge(infill, 'image', createGradientMask, 'image'); - g.addEdge(maskCombine, 'image', createGradientMask, 'mask'); + g.addEdge(resizeInputMaskToScaledSize, 'image', createGradientMask, 'mask'); g.addEdge(vaeSource, 'vae', createGradientMask, 'vae'); g.addEdge(modelLoader, 'unet', createGradientMask, 'unet'); g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask'); @@ -94,12 +92,12 @@ export const addOutpaint = async ( g.addEdge(i2l, 'latents', denoise, 'latents'); // Resize the output image back to the original size - const resizeImageToOriginalSize = g.addNode({ + const resizeOutputImageToOriginalSize = g.addNode({ id: 'resize_image_to_original_size', type: 'img_resize', ...originalSize, }); - const resizeMaskToOriginalSize = g.addNode({ + const resizeOutputMaskToOriginalSize = g.addNode({ id: 'resize_mask_to_original_size', type: 'img_resize', ...originalSize, @@ -114,12 +112,12 @@ export const addOutpaint = async ( // Resize initial image and mask to scaled size, feed into to gradient mask // After denoising, resize the image and mask back to original size - g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image'); - g.addEdge(createGradientMask, 'expanded_mask_area', resizeMaskToOriginalSize, 'image'); + g.addEdge(l2i, 'image', resizeOutputImageToOriginalSize, 'image'); + g.addEdge(createGradientMask, 'expanded_mask_area', resizeOutputMaskToOriginalSize, 'image'); // Finally, paste the generated masked image back onto the original image - g.addEdge(resizeImageToOriginalSize, 'image', canvasPasteBack, 'target_image'); - g.addEdge(resizeMaskToOriginalSize, 'image', canvasPasteBack, 'mask'); + g.addEdge(resizeOutputImageToOriginalSize, 'image', canvasPasteBack, 'target_image'); + g.addEdge(resizeOutputMaskToOriginalSize, 'image', canvasPasteBack, 'mask'); return canvasPasteBack; } else { @@ -154,7 +152,6 @@ export const addOutpaint = async ( id: 'canvas_paste_back', type: 'canvas_paste_back', mask_blur: compositing.maskBlur, - mask: { image_name: maskImage.image_name }, }); g.addEdge(maskAlphaToMask, 'image', maskCombine, 'mask1'); g.addEdge(initialImageAlphaToMask, 'image', maskCombine, 'mask2'); @@ -165,6 +162,7 @@ export const addOutpaint = async ( g.addEdge(vaeSource, 'vae', createGradientMask, 'vae'); g.addEdge(modelLoader, 'unet', createGradientMask, 'unet'); g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask'); + g.addEdge(createGradientMask, 'expanded_mask_area', canvasPasteBack, 'mask'); g.addEdge(infill, 'image', canvasPasteBack, 'source_image'); g.addEdge(l2i, 'image', canvasPasteBack, 'target_image'); diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addWatermarker.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addWatermarker.ts index 9111a77630..9cd197a38c 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addWatermarker.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addWatermarker.ts @@ -10,21 +10,13 @@ import type { Invocation } from 'services/api/types'; */ export const addWatermarker = ( g: Graph, - imageOutput: Invocation<'l2i'> | Invocation<'img_nsfw'> | Invocation<'img_watermark'> | Invocation<'img_resize'> + imageOutput: Invocation<'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_paste_back'> ): Invocation<'img_watermark'> => { const watermark = g.addNode({ id: WATERMARKER, type: 'img_watermark', - is_intermediate: imageOutput.is_intermediate, - board: imageOutput.board, - use_cache: false, }); - // The watermarker node is the new image output - make the previous one intermediate - imageOutput.is_intermediate = true; - imageOutput.use_cache = true; - imageOutput.board = undefined; - g.addEdge(imageOutput, 'image', watermark, 'image'); return watermark; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildSD1Graph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildSD1Graph.ts index 410ce19cdc..894ea19d84 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildSD1Graph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildSD1Graph.ts @@ -157,7 +157,9 @@ export const buildSD1Graph = async (state: RootState, manager: KonvaNodeManager) addLoRAs(state, g, denoise, modelLoader, seamless, clipSkip, posCond, negCond); // We might get the VAE from the main model, custom VAE, or seamless node. - const vaeSource: Invocation<'main_model_loader' | 'seamless' | 'vae_loader'> = seamless ?? vaeLoader ?? modelLoader; + const vaeSource: Invocation< + 'main_model_loader' | 'sdxl_model_loader' | 'sdxl_model_loader' | 'seamless' | 'vae_loader' + > = seamless ?? vaeLoader ?? modelLoader; g.addEdge(vaeSource, 'vae', l2i, 'vae'); if (generationMode === 'txt2img') { @@ -169,11 +171,10 @@ export const buildSD1Graph = async (state: RootState, manager: KonvaNodeManager) l2i, denoise, vaeSource, - imageOutput, originalSize, scaledSize, bbox, - params.img2imgStrength + 1 - params.img2imgStrength ); } else if (generationMode === 'inpaint') { const { compositing } = state.canvasV2; @@ -188,7 +189,7 @@ export const buildSD1Graph = async (state: RootState, manager: KonvaNodeManager) scaledSize, bbox, compositing, - params.img2imgStrength, + 1 - params.img2imgStrength, vaePrecision ); } else if (generationMode === 'outpaint') { @@ -204,7 +205,7 @@ export const buildSD1Graph = async (state: RootState, manager: KonvaNodeManager) scaledSize, bbox, compositing, - params.img2imgStrength, + 1 - params.img2imgStrength, vaePrecision ); } diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildSDXLGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildSDXLGraph.ts index 5523f77795..e8591a9b95 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildSDXLGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildSDXLGraph.ts @@ -14,15 +14,18 @@ import { VAE_LOADER, } from 'features/nodes/util/graph/constants'; import { addControlAdapters } from 'features/nodes/util/graph/generation/addControlAdapters'; +import { addImageToImage } from 'features/nodes/util/graph/generation/addImageToImage'; +import { addInpaint } from 'features/nodes/util/graph/generation/addInpaint'; import { addIPAdapters } from 'features/nodes/util/graph/generation/addIPAdapters'; import { addNSFWChecker } from 'features/nodes/util/graph/generation/addNSFWChecker'; +import { addOutpaint } from 'features/nodes/util/graph/generation/addOutpaint'; import { addSDXLLoRAs } from 'features/nodes/util/graph/generation/addSDXLLoRAs'; import { addSDXLRefiner } from 'features/nodes/util/graph/generation/addSDXLRefiner'; import { addSeamless } from 'features/nodes/util/graph/generation/addSeamless'; +import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage'; import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker'; import { Graph } from 'features/nodes/util/graph/generation/Graph'; import { getBoardField, getSDXLStylePrompts, getSizes } from 'features/nodes/util/graph/graphBuilderUtils'; -import { isEqual, pick } from 'lodash-es'; import type { Invocation, NonNullableGraph } from 'services/api/types'; import { isNonRefinerMainModelConfig } from 'services/api/types'; import { assert } from 'tsafe'; @@ -48,7 +51,6 @@ export const buildSDXLGraph = async (state: RootState, manager: KonvaNodeManager negativePrompt, refinerModel, refinerStart, - img2imgStrength, } = params; assert(model, 'No model found in state'); @@ -105,10 +107,6 @@ export const buildSDXLGraph = async (state: RootState, manager: KonvaNodeManager type: 'l2i', id: LATENTS_TO_IMAGE, fp32: vaePrecision === 'fp32', - board: getBoardField(state), - // This is the terminal node and must always save to gallery. - is_intermediate: false, - use_cache: false, }); const vaeLoader = vae?.base === model.base @@ -119,8 +117,7 @@ export const buildSDXLGraph = async (state: RootState, manager: KonvaNodeManager }) : null; - let imageOutput: Invocation<'l2i'> | Invocation<'img_nsfw'> | Invocation<'img_watermark'> | Invocation<'img_resize'> = - l2i; + let imageOutput: Invocation<'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_paste_back'> = l2i; g.addEdge(modelLoader, 'unet', denoise, 'unet'); g.addEdge(modelLoader, 'clip', posCond, 'clip'); @@ -169,52 +166,51 @@ export const buildSDXLGraph = async (state: RootState, manager: KonvaNodeManager } if (generationMode === 'txt2img') { - if (!isEqual(scaledSize, originalSize)) { - // We are using scaled bbox and need to resize the output image back to the original size. - imageOutput = g.addNode({ - id: 'img_resize', - type: 'img_resize', - ...originalSize, - is_intermediate: false, - use_cache: false, - }); - g.addEdge(l2i, 'image', imageOutput, 'image'); - } + imageOutput = addTextToImage(g, l2i, originalSize, scaledSize); } else if (generationMode === 'img2img') { - denoise.denoising_start = refinerModel ? Math.min(refinerStart, 1 - img2imgStrength) : 1 - img2imgStrength; - - const { image_name } = await manager.util.getImageSourceImage({ - bbox: pick(bbox, ['x', 'y', 'width', 'height']), - preview: true, - }); - - if (!isEqual(scaledSize, originalSize)) { - // We are using scaled bbox and need to resize the output image back to the original size. - const initialImageResize = g.addNode({ - id: 'initial_image_resize', - type: 'img_resize', - ...scaledSize, - image: { image_name }, - }); - const i2l = g.addNode({ id: 'i2l', type: 'i2l' }); - - g.addEdge(vaeSource, 'vae', i2l, 'vae'); - g.addEdge(initialImageResize, 'image', i2l, 'image'); - g.addEdge(i2l, 'latents', denoise, 'latents'); - - imageOutput = g.addNode({ - id: 'img_resize', - type: 'img_resize', - ...originalSize, - is_intermediate: false, - use_cache: false, - }); - g.addEdge(l2i, 'image', imageOutput, 'image'); - } else { - const i2l = g.addNode({ id: 'i2l', type: 'i2l', image: { image_name } }); - g.addEdge(vaeSource, 'vae', i2l, 'vae'); - g.addEdge(i2l, 'latents', denoise, 'latents'); - } + imageOutput = await addImageToImage( + g, + manager, + l2i, + denoise, + vaeSource, + originalSize, + scaledSize, + bbox, + refinerModel ? Math.min(refinerStart, 1 - params.img2imgStrength) : 1 - params.img2imgStrength + ); + } else if (generationMode === 'inpaint') { + const { compositing } = state.canvasV2; + imageOutput = await addInpaint( + g, + manager, + l2i, + denoise, + vaeSource, + modelLoader, + originalSize, + scaledSize, + bbox, + compositing, + refinerModel ? Math.min(refinerStart, 1 - params.img2imgStrength) : 1 - params.img2imgStrength, + vaePrecision + ); + } else if (generationMode === 'outpaint') { + const { compositing } = state.canvasV2; + imageOutput = await addOutpaint( + g, + manager, + l2i, + denoise, + vaeSource, + modelLoader, + originalSize, + scaledSize, + bbox, + compositing, + refinerModel ? Math.min(refinerStart, 1 - params.img2imgStrength) : 1 - params.img2imgStrength, + vaePrecision + ); } const _addedCAs = addControlAdapters(state.canvasV2.controlAdapters.entities, g, denoise, modelConfig.base); @@ -241,6 +237,11 @@ export const buildSDXLGraph = async (state: RootState, manager: KonvaNodeManager imageOutput = addWatermarker(g, imageOutput); } + // This is the terminal node and must always save to gallery. + imageOutput.is_intermediate = false; + imageOutput.use_cache = false; + imageOutput.board = getBoardField(state); + g.setMetadataReceivingNode(imageOutput); return g.getGraph(); };