diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts index c06ffa5ca6..a57036d366 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts @@ -16,7 +16,7 @@ export const addImageToImage = async ( scaledSize: Dimensions, bbox: CanvasV2State['bbox'], strength: ParameterStrength -) => { +): Promise> => { denoise.denoising_start = 1 - strength; const cropBbox = pick(bbox, ['x', 'y', 'width', 'height']); @@ -46,11 +46,12 @@ export const addImageToImage = async ( g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image'); // This is the new output node - imageOutput = resizeImageToOriginalSize; + return resizeImageToOriginalSize; } else { // No need to resize, just denoise const i2l = g.addNode({ id: 'i2l', type: 'i2l', image: { image_name: initialImage.image_name } }); g.addEdge(vaeSource, 'vae', i2l, 'vae'); g.addEdge(i2l, 'latents', denoise, 'latents'); + return l2i; } }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts index 9d946d2002..48cf324901 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts @@ -12,14 +12,13 @@ export const addInpaint = async ( denoise: Invocation<'denoise_latents'>, vaeSource: Invocation<'main_model_loader' | 'seamless' | 'vae_loader'>, modelLoader: Invocation<'main_model_loader'>, - imageOutput: Invocation<'canvas_paste_back' | 'img_nsfw' | 'img_resize' | 'img_watermark' | 'l2i'>, originalSize: Dimensions, scaledSize: Dimensions, bbox: CanvasV2State['bbox'], compositing: CanvasV2State['compositing'], strength: ParameterStrength, vaePrecision: ParameterPrecision -) => { +): Promise> => { denoise.denoising_start = 1 - strength; const cropBbox = pick(bbox, ['x', 'y', 'width', 'height']); @@ -98,7 +97,7 @@ export const addInpaint = async ( g.addEdge(resizeImageToOriginalSize, 'image', canvasPasteBack, 'target_image'); g.addEdge(resizeMaskToOriginalSize, 'image', canvasPasteBack, 'mask'); - imageOutput = canvasPasteBack; + return canvasPasteBack; } else { // No scale before processing, much simpler const i2l = g.addNode({ id: 'i2l', type: 'i2l', image: { image_name: initialImage.image_name } }); @@ -132,6 +131,6 @@ export const addInpaint = async ( g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask'); g.addEdge(l2i, 'image', canvasPasteBack, 'target_image'); - imageOutput = canvasPasteBack; + return canvasPasteBack; } }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts index 440eeda934..dad18653c1 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts @@ -13,14 +13,13 @@ export const addOutpaint = async ( denoise: Invocation<'denoise_latents'>, vaeSource: Invocation<'main_model_loader' | 'seamless' | 'vae_loader'>, modelLoader: Invocation<'main_model_loader'>, - imageOutput: Invocation<'canvas_paste_back' | 'img_nsfw' | 'img_resize' | 'img_watermark' | 'l2i'>, originalSize: Dimensions, scaledSize: Dimensions, bbox: CanvasV2State['bbox'], compositing: CanvasV2State['compositing'], strength: ParameterStrength, vaePrecision: ParameterPrecision -) => { +): Promise> => { denoise.denoising_start = 1 - strength; const cropBbox = pick(bbox, ['x', 'y', 'width', 'height']); @@ -36,24 +35,65 @@ export const addOutpaint = async ( if (!isEqual(scaledSize, originalSize)) { // Scale before processing requires some resizing - const i2l = g.addNode({ id: 'i2l', type: 'i2l' }); + + // Combine the inpaint mask and the initial image's alpha channel into a single mask + const maskAlphaToMask = g.addNode({ + id: 'alpha_to_mask', + type: 'tomask', + image: { image_name: maskImage.image_name }, + invert: true, + }); + const initialImageAlphaToMask = g.addNode({ + id: 'image_alpha_to_mask', + type: 'tomask', + image: { image_name: initialImage.image_name }, + }); + const maskCombine = g.addNode({ + id: 'mask_combine', + type: 'mask_combine', + }); + g.addEdge(maskAlphaToMask, 'image', maskCombine, 'mask1'); + g.addEdge(initialImageAlphaToMask, 'image', maskCombine, 'mask2'); + + // Resize the combined and initial image to the scaled size + const resizeMaskToScaledSize = g.addNode({ + id: 'resize_mask_to_scaled_size', + type: 'img_resize', + ...scaledSize, + }); + g.addEdge(maskCombine, 'image', resizeMaskToScaledSize, 'image'); + + // Resize the initial image to the scaled size and infill const resizeImageToScaledSize = g.addNode({ id: 'resize_image_to_scaled_size', type: 'img_resize', image: { image_name: initialImage.image_name }, ...scaledSize, }); - const alphaToMask = g.addNode({ - id: 'alpha_to_mask', - type: 'tomask', - image: { image_name: maskImage.image_name }, - invert: true, - }); - const resizeMaskToScaledSize = g.addNode({ - id: 'resize_mask_to_scaled_size', - type: 'img_resize', - ...scaledSize, + g.addEdge(resizeImageToScaledSize, 'image', infill, 'image'); + + // Create the gradient denoising mask from the combined mask + const createGradientMask = g.addNode({ + id: 'create_gradient_mask', + type: 'create_gradient_mask', + coherence_mode: compositing.canvasCoherenceMode, + minimum_denoise: compositing.canvasCoherenceMinDenoise, + edge_radius: compositing.canvasCoherenceEdgeSize, + fp32: vaePrecision === 'fp32', }); + g.addEdge(infill, 'image', createGradientMask, 'image'); + g.addEdge(maskCombine, 'image', createGradientMask, 'mask'); + g.addEdge(vaeSource, 'vae', createGradientMask, 'vae'); + g.addEdge(modelLoader, 'unet', createGradientMask, 'unet'); + g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask'); + + // Decode infilled image and connect to denoise + const i2l = g.addNode({ id: 'i2l', type: 'i2l' }); + g.addEdge(infill, 'image', i2l, 'image'); + g.addEdge(vaeSource, 'vae', i2l, 'vae'); + g.addEdge(i2l, 'latents', denoise, 'latents'); + + // Resize the output image back to the original size const resizeImageToOriginalSize = g.addNode({ id: 'resize_image_to_original_size', type: 'img_resize', @@ -64,14 +104,6 @@ export const addOutpaint = async ( type: 'img_resize', ...originalSize, }); - const createGradientMask = g.addNode({ - id: 'create_gradient_mask', - type: 'create_gradient_mask', - coherence_mode: compositing.canvasCoherenceMode, - minimum_denoise: compositing.canvasCoherenceMinDenoise, - edge_radius: compositing.canvasCoherenceEdgeSize, - fp32: vaePrecision === 'fp32', - }); const canvasPasteBack = g.addNode({ id: 'canvas_paste_back', type: 'canvas_paste_back', @@ -80,17 +112,6 @@ export const addOutpaint = async ( }); // Resize initial image and mask to scaled size, feed into to gradient mask - g.addEdge(alphaToMask, 'image', resizeMaskToScaledSize, 'image'); - g.addEdge(resizeImageToScaledSize, 'image', i2l, 'image'); - g.addEdge(i2l, 'latents', denoise, 'latents'); - g.addEdge(vaeSource, 'vae', i2l, 'vae'); - - g.addEdge(vaeSource, 'vae', createGradientMask, 'vae'); - g.addEdge(modelLoader, 'unet', createGradientMask, 'unet'); - g.addEdge(resizeImageToScaledSize, 'image', createGradientMask, 'image'); - g.addEdge(resizeMaskToScaledSize, 'image', createGradientMask, 'mask'); - - g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask'); // After denoising, resize the image and mask back to original size g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image'); @@ -100,7 +121,7 @@ export const addOutpaint = async ( g.addEdge(resizeImageToOriginalSize, 'image', canvasPasteBack, 'target_image'); g.addEdge(resizeMaskToOriginalSize, 'image', canvasPasteBack, 'mask'); - imageOutput = canvasPasteBack; + return canvasPasteBack; } else { infill.image = { image_name: initialImage.image_name }; // No scale before processing, much simpler @@ -147,6 +168,6 @@ export const addOutpaint = async ( g.addEdge(infill, 'image', canvasPasteBack, 'source_image'); g.addEdge(l2i, 'image', canvasPasteBack, 'target_image'); - imageOutput = canvasPasteBack; + return canvasPasteBack; } }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addTextToImage.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addTextToImage.ts index 00792efe5c..bc11f76be2 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addTextToImage.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addTextToImage.ts @@ -6,10 +6,9 @@ import type { Invocation } from 'services/api/types'; export const addTextToImage = ( g: Graph, l2i: Invocation<'l2i'>, - imageOutput: Invocation<'canvas_paste_back' | 'img_nsfw' | 'img_resize' | 'img_watermark' | 'l2i'>, originalSize: Dimensions, scaledSize: Dimensions -) => { +): Invocation<'img_resize' | 'l2i'> => { if (!isEqual(scaledSize, originalSize)) { // We need to resize the output image back to the original size const resizeImageToOriginalSize = g.addNode({ @@ -19,7 +18,8 @@ export const addTextToImage = ( }); g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image'); - // This is the new output node - imageOutput = resizeImageToOriginalSize; + return resizeImageToOriginalSize; + } else { + return l2i; } }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildSD1Graph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildSD1Graph.ts index 428b8d72d1..410ce19cdc 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildSD1Graph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildSD1Graph.ts @@ -109,7 +109,6 @@ export const buildSD1Graph = async (state: RootState, manager: KonvaNodeManager) type: 'l2i', id: LATENTS_TO_IMAGE, fp32: vaePrecision === 'fp32', - board: getBoardField(state), }); const vaeLoader = vae?.base === model.base @@ -162,9 +161,9 @@ export const buildSD1Graph = async (state: RootState, manager: KonvaNodeManager) g.addEdge(vaeSource, 'vae', l2i, 'vae'); if (generationMode === 'txt2img') { - addTextToImage(g, l2i, imageOutput, originalSize, scaledSize); + imageOutput = addTextToImage(g, l2i, originalSize, scaledSize); } else if (generationMode === 'img2img') { - addImageToImage( + imageOutput = await addImageToImage( g, manager, l2i, @@ -178,14 +177,13 @@ export const buildSD1Graph = async (state: RootState, manager: KonvaNodeManager) ); } else if (generationMode === 'inpaint') { const { compositing } = state.canvasV2; - addInpaint( + imageOutput = await addInpaint( g, manager, l2i, denoise, vaeSource, modelLoader, - imageOutput, originalSize, scaledSize, bbox, @@ -195,14 +193,13 @@ export const buildSD1Graph = async (state: RootState, manager: KonvaNodeManager) ); } else if (generationMode === 'outpaint') { const { compositing } = state.canvasV2; - addOutpaint( + imageOutput = await addOutpaint( g, manager, l2i, denoise, vaeSource, modelLoader, - imageOutput, originalSize, scaledSize, bbox, @@ -244,6 +241,7 @@ export const buildSD1Graph = async (state: RootState, manager: KonvaNodeManager) // This is the terminal node and must always save to gallery. imageOutput.is_intermediate = false; imageOutput.use_cache = false; + imageOutput.board = getBoardField(state); g.setMetadataReceivingNode(imageOutput); return g.getGraph();