diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addCommitStagingAreaImageListener.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addCommitStagingAreaImageListener.ts index c6efd494e9..f1501b9533 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addCommitStagingAreaImageListener.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addCommitStagingAreaImageListener.ts @@ -1,8 +1,7 @@ import { logger } from 'app/logging/logger'; import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { - layerAdded, - layerImageAdded, + layerAddedFromStagingArea, sessionStagingAreaImageAccepted, sessionStagingAreaReset, } from 'features/controlLayers/store/canvasV2Slice'; @@ -49,33 +48,13 @@ export const addStagingListeners = (startAppListening: AppStartListening) => { actionCreator: sessionStagingAreaImageAccepted, effect: async (action, api) => { const { index } = action.payload; - const { layers, selectedEntityIdentifier } = api.getState().canvasV2; - let layer = layers.entities.find((layer) => layer.id === selectedEntityIdentifier?.id); + const state = api.getState(); + const stagingAreaImage = state.canvasV2.session.stagedImages[index]; - if (!layer) { - layer = layers.entities[0]; - } + assert(stagingAreaImage, 'No staged image found to accept'); + const { x, y } = state.canvasV2.bbox.rect; - if (!layer) { - // We need to create a new layer to add the accepted image - api.dispatch(layerAdded()); - layer = api.getState().canvasV2.layers.entities[0]; - } - - const stagedImage = api.getState().canvasV2.session.stagedImages[index]; - - assert(stagedImage, 'No staged image found to accept'); - assert(layer, 'No layer found to stage image'); - - const { id } = layer; - - api.dispatch( - layerImageAdded({ - id, - imageDTO: stagedImage.imageDTO, - pos: { x: stagedImage.rect.x - layer.x, y: stagedImage.rect.y - layer.y }, - }) - ); + api.dispatch(layerAddedFromStagingArea({ stagingAreaImage, pos: { x, y } })); api.dispatch(sessionStagingAreaReset()); }, }); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts index 0e3cffe22b..d8dcfb76b1 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts @@ -59,12 +59,20 @@ export const addInvocationCompleteEventListener = (startAppListening: AppStartLi imageDTORequest.unsubscribe(); // handle tab-specific logic - if (data.origin === 'canvas' && data.result.type === 'canvas_v2_mask_and_crop_output') { - const { x, y, width, height } = data.result; - if (canvasV2.session.isStaging) { - dispatch(sessionImageStaged({ imageDTO, rect: { x, y, width, height } })); - } else if (!canvasV2.session.isActive) { - $lastProgressEvent.set(null); + if (data.origin === 'canvas' && data.invocation_source_id === 'canvas_output') { + if (data.result.type === 'canvas_v2_mask_and_crop_output') { + const { offset_x, offset_y } = data.result; + if (canvasV2.session.isStaging) { + dispatch(sessionImageStaged({ stagingAreaImage: { imageDTO, offsetX: offset_x, offsetY: offset_y } })); + } else if (!canvasV2.session.isActive) { + $lastProgressEvent.set(null); + } + } else if (data.result.type === 'image_output') { + if (canvasV2.session.isStaging) { + dispatch(sessionImageStaged({ stagingAreaImage: { imageDTO, offsetX: 0, offsetY: 0 } })); + } else if (!canvasV2.session.isActive) { + $lastProgressEvent.set(null); + } } } diff --git a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasStagingArea.ts b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasStagingArea.ts index e6e5d72b13..4bee9409a5 100644 --- a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasStagingArea.ts +++ b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasStagingArea.ts @@ -18,55 +18,47 @@ export class CanvasStagingArea { async render() { const session = this.manager.stateApi.getSession(); + const bboxRect = this.manager.stateApi.getBbox().rect; const shouldShowStagedImage = this.manager.stateApi.getShouldShowStagedImage(); this.selectedImage = session.stagedImages[session.selectedStagedImageIndex] ?? null; if (this.selectedImage) { + const { imageDTO, offsetX, offsetY } = this.selectedImage; if (this.image) { - if ( - !this.image.isLoading && - !this.image.isError && - this.image.imageName !== this.selectedImage.imageDTO.image_name - ) { - await this.image.updateImageSource(this.selectedImage.imageDTO.image_name); + if (!this.image.isLoading && !this.image.isError && this.image.imageName !== imageDTO.image_name) { + this.image.konvaImageGroup.visible(false); + this.image.konvaImage?.width(imageDTO.width); + this.image.konvaImage?.height(imageDTO.height); + this.image.konvaImageGroup.x(bboxRect.x + offsetX); + this.image.konvaImageGroup.y(bboxRect.y + offsetY); + await this.image.updateImageSource(imageDTO.image_name); } - this.image.konvaImageGroup.x(this.selectedImage.rect.x); - this.image.konvaImageGroup.y(this.selectedImage.rect.y); - this.image.konvaImageGroup.visible(shouldShowStagedImage); } else { - const { image_name } = this.selectedImage.imageDTO; - const { x, y, width, height } = this.selectedImage.rect; - this.image = new CanvasImage( - { - id: 'staging-area-image', - type: 'image', - x, - y, + const { image_name, width, height } = imageDTO; + this.image = new CanvasImage({ + id: 'staging-area-image', + type: 'image', + x: 0, + y: 0, + width, + height, + filters: [], + image: { + name: image_name, width, height, - filters: [], - image: { - name: image_name, - width, - height, - }, }, - { - onLoad: (konvaImage) => { - if (this.selectedImage) { - konvaImage.width(this.selectedImage.rect.width); - konvaImage.height(this.selectedImage.rect.height); - } - this.manager.stateApi.resetLastProgressEvent(); - this.image?.konvaImageGroup.visible(shouldShowStagedImage); - }, - } - ); + }); this.group.add(this.image.konvaImageGroup); - await this.image.updateImageSource(this.selectedImage.imageDTO.image_name); - this.image.konvaImageGroup.visible(shouldShowStagedImage); + this.image.konvaImage?.width(imageDTO.width); + this.image.konvaImage?.height(imageDTO.height); + this.image.konvaImageGroup.x(bboxRect.x + offsetX); + this.image.konvaImageGroup.y(bboxRect.y + offsetY); + await this.image.updateImageSource(imageDTO.image_name); } + this.manager.stateApi.resetLastProgressEvent(); + this.image.konvaImageGroup.visible(shouldShowStagedImage); } else { this.image?.konvaImageGroup.visible(false); } diff --git a/invokeai/frontend/web/src/features/controlLayers/store/canvasV2Slice.ts b/invokeai/frontend/web/src/features/controlLayers/store/canvasV2Slice.ts index b96fa09da0..f5c106457b 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/canvasV2Slice.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/canvasV2Slice.ts @@ -15,7 +15,10 @@ import { regionsReducers } from 'features/controlLayers/store/regionsReducers'; import { sessionReducers } from 'features/controlLayers/store/sessionReducers'; import { settingsReducers } from 'features/controlLayers/store/settingsReducers'; import { toolReducers } from 'features/controlLayers/store/toolReducers'; +import { getScaledBoundingBoxDimensions } from 'features/controlLayers/util/getScaledBoundingBoxDimensions'; import { initialAspectRatioState } from 'features/parameters/components/DocumentSize/constants'; +import { getOptimalDimension } from 'features/parameters/util/optimalDimension'; +import { pick } from 'lodash-es'; import { atom } from 'nanostores'; import type { InvocationDenoiseProgressEvent } from 'services/events/types'; @@ -158,6 +161,12 @@ export const canvasV2Slice = createSlice({ }, canvasReset: (state) => { state.bbox = deepClone(initialState.bbox); + const optimalDimension = getOptimalDimension(state.params.model); + state.bbox.rect.width = optimalDimension; + state.bbox.rect.height = optimalDimension; + const size = pick(state.bbox.rect, 'width', 'height'); + state.bbox.scaledSize = getScaledBoundingBoxDimensions(size, optimalDimension); + state.controlAdapters = deepClone(initialState.controlAdapters); state.ipAdapters = deepClone(initialState.ipAdapters); state.layers = deepClone(initialState.layers); @@ -195,6 +204,7 @@ export const { bboxSizeOptimized, // layers layerAdded, + layerAddedFromStagingArea, layerRecalled, layerDeleted, layerReset, diff --git a/invokeai/frontend/web/src/features/controlLayers/store/layersReducers.ts b/invokeai/frontend/web/src/features/controlLayers/store/layersReducers.ts index d63f7e9c6b..d98740258e 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/layersReducers.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/layersReducers.ts @@ -11,8 +11,10 @@ import type { EraserLine, ImageObjectAddedArg, LayerEntity, + Position, RectShape, ScaleChangedArg, + StagingAreaImage, } from './types'; import { imageDTOToImageObject, imageDTOToImageWithDims } from './types'; @@ -43,6 +45,32 @@ export const layersReducers = { }, prepare: () => ({ payload: { id: uuidv4() } }), }, + layerAddedFromStagingArea: { + reducer: ( + state, + action: PayloadAction<{ id: string; objectId: string; stagingAreaImage: StagingAreaImage; pos: Position }> + ) => { + const { id, objectId, stagingAreaImage, pos } = action.payload; + const { imageDTO, offsetX, offsetY } = stagingAreaImage; + const imageObject = imageDTOToImageObject(id, objectId, imageDTO); + state.layers.entities.push({ + id, + type: 'layer', + isEnabled: true, + bbox: null, + bboxNeedsUpdate: false, + objects: [imageObject], + opacity: 1, + x: pos.x + offsetX, + y: pos.y + offsetY, + }); + state.selectedEntityIdentifier = { type: 'layer', id }; + state.layers.imageCache = null; + }, + prepare: (payload: { stagingAreaImage: StagingAreaImage; pos: Position }) => ({ + payload: { ...payload, id: uuidv4(), objectId: uuidv4() }, + }), + }, layerRecalled: (state, action: PayloadAction<{ data: LayerEntity }>) => { const { data } = action.payload; state.layers.entities.push(data); diff --git a/invokeai/frontend/web/src/features/controlLayers/store/paramsReducers.ts b/invokeai/frontend/web/src/features/controlLayers/store/paramsReducers.ts index 548833587e..5e8a2b60ae 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/paramsReducers.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/paramsReducers.ts @@ -60,7 +60,6 @@ export const paramsReducers = { } // Update the bbox size to match the new model's optimal size - // TODO(psyche): Should we change the document size too? const optimalDimension = getOptimalDimension(model); if (!getIsSizeOptimal(state.bbox.rect.width, state.bbox.rect.height, optimalDimension)) { const bboxDims = calculateNewSize(state.bbox.aspectRatio.value, optimalDimension * optimalDimension); diff --git a/invokeai/frontend/web/src/features/controlLayers/store/sessionReducers.ts b/invokeai/frontend/web/src/features/controlLayers/store/sessionReducers.ts index 8b6d240f09..03236aeaa2 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/sessionReducers.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/sessionReducers.ts @@ -14,9 +14,9 @@ export const sessionReducers = { state.tool.selectedBuffer = state.tool.selected; state.tool.selected = 'view'; }, - sessionImageStaged: (state, action: PayloadAction) => { - const { imageDTO, rect } = action.payload; - state.session.stagedImages.push({ imageDTO, rect }); + sessionImageStaged: (state, action: PayloadAction<{ stagingAreaImage: StagingAreaImage }>) => { + const { stagingAreaImage } = action.payload; + state.session.stagedImages.push(stagingAreaImage); state.session.selectedStagedImageIndex = state.session.stagedImages.length - 1; }, sessionNextStagedImageSelected: (state) => { diff --git a/invokeai/frontend/web/src/features/controlLayers/store/types.ts b/invokeai/frontend/web/src/features/controlLayers/store/types.ts index bafc43727c..f4972f2d1d 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/types.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/types.ts @@ -828,7 +828,8 @@ export type LoRA = { export type StagingAreaImage = { imageDTO: ImageDTO; - rect: Rect; + offsetX: number; + offsetY: number; }; export type CanvasV2State = { diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts index ce001e44a5..ceeed6caa4 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts @@ -66,8 +66,7 @@ export const addInpaint = async ( const canvasPasteBack = g.addNode({ id: 'canvas_v2_mask_and_crop', type: 'canvas_v2_mask_and_crop', - invert: true, - crop_visible: true, + mask_blur: compositing.maskBlur, }); // Resize initial image and mask to scaled size, feed into to gradient mask @@ -113,8 +112,7 @@ export const addInpaint = async ( const canvasPasteBack = g.addNode({ id: 'canvas_v2_mask_and_crop', type: 'canvas_v2_mask_and_crop', - invert: true, - crop_visible: true, + mask_blur: compositing.maskBlur, }); g.addEdge(alphaToMask, 'image', createGradientMask, 'mask'); g.addEdge(i2l, 'latents', denoise, 'latents'); diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addNSFWChecker.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addNSFWChecker.ts index 5a3bb741f5..ec9a809a9f 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addNSFWChecker.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addNSFWChecker.ts @@ -10,7 +10,7 @@ import type { Invocation } from 'services/api/types'; */ export const addNSFWChecker = ( g: Graph, - imageOutput: Invocation<'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_paste_back'> + imageOutput: Invocation<'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop'> ): Invocation<'img_nsfw'> => { const nsfw = g.addNode({ id: NSFW_CHECKER, diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts index 8d8caae6d6..fd0dd8b193 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts @@ -101,8 +101,7 @@ export const addOutpaint = async ( const canvasPasteBack = g.addNode({ id: 'canvas_v2_mask_and_crop', type: 'canvas_v2_mask_and_crop', - invert: true, - crop_visible: true, + mask_blur: compositing.maskBlur, }); // Resize initial image and mask to scaled size, feed into to gradient mask @@ -147,8 +146,7 @@ export const addOutpaint = async ( const canvasPasteBack = g.addNode({ id: 'canvas_v2_mask_and_crop', type: 'canvas_v2_mask_and_crop', - invert: true, - crop_visible: true, + mask_blur: compositing.maskBlur, }); g.addEdge(maskAlphaToMask, 'image', maskCombine, 'mask1'); g.addEdge(initialImageAlphaToMask, 'image', maskCombine, 'mask2'); diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addWatermarker.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addWatermarker.ts index 9cd197a38c..b0f0f14008 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addWatermarker.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addWatermarker.ts @@ -10,7 +10,7 @@ import type { Invocation } from 'services/api/types'; */ export const addWatermarker = ( g: Graph, - imageOutput: Invocation<'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_paste_back'> + imageOutput: Invocation<'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop'> ): Invocation<'img_watermark'> => { const watermark = g.addNode({ id: WATERMARKER,