mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
feat(ui): update staging handling to work w/ cropped mask
This commit is contained in:
parent
5087b306c0
commit
ef4d6c26f6
@ -1,8 +1,7 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import {
|
||||
layerAdded,
|
||||
layerImageAdded,
|
||||
layerAddedFromStagingArea,
|
||||
sessionStagingAreaImageAccepted,
|
||||
sessionStagingAreaReset,
|
||||
} from 'features/controlLayers/store/canvasV2Slice';
|
||||
@ -49,33 +48,13 @@ export const addStagingListeners = (startAppListening: AppStartListening) => {
|
||||
actionCreator: sessionStagingAreaImageAccepted,
|
||||
effect: async (action, api) => {
|
||||
const { index } = action.payload;
|
||||
const { layers, selectedEntityIdentifier } = api.getState().canvasV2;
|
||||
let layer = layers.entities.find((layer) => layer.id === selectedEntityIdentifier?.id);
|
||||
const state = api.getState();
|
||||
const stagingAreaImage = state.canvasV2.session.stagedImages[index];
|
||||
|
||||
if (!layer) {
|
||||
layer = layers.entities[0];
|
||||
}
|
||||
assert(stagingAreaImage, 'No staged image found to accept');
|
||||
const { x, y } = state.canvasV2.bbox.rect;
|
||||
|
||||
if (!layer) {
|
||||
// We need to create a new layer to add the accepted image
|
||||
api.dispatch(layerAdded());
|
||||
layer = api.getState().canvasV2.layers.entities[0];
|
||||
}
|
||||
|
||||
const stagedImage = api.getState().canvasV2.session.stagedImages[index];
|
||||
|
||||
assert(stagedImage, 'No staged image found to accept');
|
||||
assert(layer, 'No layer found to stage image');
|
||||
|
||||
const { id } = layer;
|
||||
|
||||
api.dispatch(
|
||||
layerImageAdded({
|
||||
id,
|
||||
imageDTO: stagedImage.imageDTO,
|
||||
pos: { x: stagedImage.rect.x - layer.x, y: stagedImage.rect.y - layer.y },
|
||||
})
|
||||
);
|
||||
api.dispatch(layerAddedFromStagingArea({ stagingAreaImage, pos: { x, y } }));
|
||||
api.dispatch(sessionStagingAreaReset());
|
||||
},
|
||||
});
|
||||
|
@ -59,12 +59,20 @@ export const addInvocationCompleteEventListener = (startAppListening: AppStartLi
|
||||
imageDTORequest.unsubscribe();
|
||||
|
||||
// handle tab-specific logic
|
||||
if (data.origin === 'canvas' && data.result.type === 'canvas_v2_mask_and_crop_output') {
|
||||
const { x, y, width, height } = data.result;
|
||||
if (canvasV2.session.isStaging) {
|
||||
dispatch(sessionImageStaged({ imageDTO, rect: { x, y, width, height } }));
|
||||
} else if (!canvasV2.session.isActive) {
|
||||
$lastProgressEvent.set(null);
|
||||
if (data.origin === 'canvas' && data.invocation_source_id === 'canvas_output') {
|
||||
if (data.result.type === 'canvas_v2_mask_and_crop_output') {
|
||||
const { offset_x, offset_y } = data.result;
|
||||
if (canvasV2.session.isStaging) {
|
||||
dispatch(sessionImageStaged({ stagingAreaImage: { imageDTO, offsetX: offset_x, offsetY: offset_y } }));
|
||||
} else if (!canvasV2.session.isActive) {
|
||||
$lastProgressEvent.set(null);
|
||||
}
|
||||
} else if (data.result.type === 'image_output') {
|
||||
if (canvasV2.session.isStaging) {
|
||||
dispatch(sessionImageStaged({ stagingAreaImage: { imageDTO, offsetX: 0, offsetY: 0 } }));
|
||||
} else if (!canvasV2.session.isActive) {
|
||||
$lastProgressEvent.set(null);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -18,55 +18,47 @@ export class CanvasStagingArea {
|
||||
|
||||
async render() {
|
||||
const session = this.manager.stateApi.getSession();
|
||||
const bboxRect = this.manager.stateApi.getBbox().rect;
|
||||
const shouldShowStagedImage = this.manager.stateApi.getShouldShowStagedImage();
|
||||
|
||||
this.selectedImage = session.stagedImages[session.selectedStagedImageIndex] ?? null;
|
||||
|
||||
if (this.selectedImage) {
|
||||
const { imageDTO, offsetX, offsetY } = this.selectedImage;
|
||||
if (this.image) {
|
||||
if (
|
||||
!this.image.isLoading &&
|
||||
!this.image.isError &&
|
||||
this.image.imageName !== this.selectedImage.imageDTO.image_name
|
||||
) {
|
||||
await this.image.updateImageSource(this.selectedImage.imageDTO.image_name);
|
||||
if (!this.image.isLoading && !this.image.isError && this.image.imageName !== imageDTO.image_name) {
|
||||
this.image.konvaImageGroup.visible(false);
|
||||
this.image.konvaImage?.width(imageDTO.width);
|
||||
this.image.konvaImage?.height(imageDTO.height);
|
||||
this.image.konvaImageGroup.x(bboxRect.x + offsetX);
|
||||
this.image.konvaImageGroup.y(bboxRect.y + offsetY);
|
||||
await this.image.updateImageSource(imageDTO.image_name);
|
||||
}
|
||||
this.image.konvaImageGroup.x(this.selectedImage.rect.x);
|
||||
this.image.konvaImageGroup.y(this.selectedImage.rect.y);
|
||||
this.image.konvaImageGroup.visible(shouldShowStagedImage);
|
||||
} else {
|
||||
const { image_name } = this.selectedImage.imageDTO;
|
||||
const { x, y, width, height } = this.selectedImage.rect;
|
||||
this.image = new CanvasImage(
|
||||
{
|
||||
id: 'staging-area-image',
|
||||
type: 'image',
|
||||
x,
|
||||
y,
|
||||
const { image_name, width, height } = imageDTO;
|
||||
this.image = new CanvasImage({
|
||||
id: 'staging-area-image',
|
||||
type: 'image',
|
||||
x: 0,
|
||||
y: 0,
|
||||
width,
|
||||
height,
|
||||
filters: [],
|
||||
image: {
|
||||
name: image_name,
|
||||
width,
|
||||
height,
|
||||
filters: [],
|
||||
image: {
|
||||
name: image_name,
|
||||
width,
|
||||
height,
|
||||
},
|
||||
},
|
||||
{
|
||||
onLoad: (konvaImage) => {
|
||||
if (this.selectedImage) {
|
||||
konvaImage.width(this.selectedImage.rect.width);
|
||||
konvaImage.height(this.selectedImage.rect.height);
|
||||
}
|
||||
this.manager.stateApi.resetLastProgressEvent();
|
||||
this.image?.konvaImageGroup.visible(shouldShowStagedImage);
|
||||
},
|
||||
}
|
||||
);
|
||||
});
|
||||
this.group.add(this.image.konvaImageGroup);
|
||||
await this.image.updateImageSource(this.selectedImage.imageDTO.image_name);
|
||||
this.image.konvaImageGroup.visible(shouldShowStagedImage);
|
||||
this.image.konvaImage?.width(imageDTO.width);
|
||||
this.image.konvaImage?.height(imageDTO.height);
|
||||
this.image.konvaImageGroup.x(bboxRect.x + offsetX);
|
||||
this.image.konvaImageGroup.y(bboxRect.y + offsetY);
|
||||
await this.image.updateImageSource(imageDTO.image_name);
|
||||
}
|
||||
this.manager.stateApi.resetLastProgressEvent();
|
||||
this.image.konvaImageGroup.visible(shouldShowStagedImage);
|
||||
} else {
|
||||
this.image?.konvaImageGroup.visible(false);
|
||||
}
|
||||
|
@ -15,7 +15,10 @@ import { regionsReducers } from 'features/controlLayers/store/regionsReducers';
|
||||
import { sessionReducers } from 'features/controlLayers/store/sessionReducers';
|
||||
import { settingsReducers } from 'features/controlLayers/store/settingsReducers';
|
||||
import { toolReducers } from 'features/controlLayers/store/toolReducers';
|
||||
import { getScaledBoundingBoxDimensions } from 'features/controlLayers/util/getScaledBoundingBoxDimensions';
|
||||
import { initialAspectRatioState } from 'features/parameters/components/DocumentSize/constants';
|
||||
import { getOptimalDimension } from 'features/parameters/util/optimalDimension';
|
||||
import { pick } from 'lodash-es';
|
||||
import { atom } from 'nanostores';
|
||||
import type { InvocationDenoiseProgressEvent } from 'services/events/types';
|
||||
|
||||
@ -158,6 +161,12 @@ export const canvasV2Slice = createSlice({
|
||||
},
|
||||
canvasReset: (state) => {
|
||||
state.bbox = deepClone(initialState.bbox);
|
||||
const optimalDimension = getOptimalDimension(state.params.model);
|
||||
state.bbox.rect.width = optimalDimension;
|
||||
state.bbox.rect.height = optimalDimension;
|
||||
const size = pick(state.bbox.rect, 'width', 'height');
|
||||
state.bbox.scaledSize = getScaledBoundingBoxDimensions(size, optimalDimension);
|
||||
|
||||
state.controlAdapters = deepClone(initialState.controlAdapters);
|
||||
state.ipAdapters = deepClone(initialState.ipAdapters);
|
||||
state.layers = deepClone(initialState.layers);
|
||||
@ -195,6 +204,7 @@ export const {
|
||||
bboxSizeOptimized,
|
||||
// layers
|
||||
layerAdded,
|
||||
layerAddedFromStagingArea,
|
||||
layerRecalled,
|
||||
layerDeleted,
|
||||
layerReset,
|
||||
|
@ -11,8 +11,10 @@ import type {
|
||||
EraserLine,
|
||||
ImageObjectAddedArg,
|
||||
LayerEntity,
|
||||
Position,
|
||||
RectShape,
|
||||
ScaleChangedArg,
|
||||
StagingAreaImage,
|
||||
} from './types';
|
||||
import { imageDTOToImageObject, imageDTOToImageWithDims } from './types';
|
||||
|
||||
@ -43,6 +45,32 @@ export const layersReducers = {
|
||||
},
|
||||
prepare: () => ({ payload: { id: uuidv4() } }),
|
||||
},
|
||||
layerAddedFromStagingArea: {
|
||||
reducer: (
|
||||
state,
|
||||
action: PayloadAction<{ id: string; objectId: string; stagingAreaImage: StagingAreaImage; pos: Position }>
|
||||
) => {
|
||||
const { id, objectId, stagingAreaImage, pos } = action.payload;
|
||||
const { imageDTO, offsetX, offsetY } = stagingAreaImage;
|
||||
const imageObject = imageDTOToImageObject(id, objectId, imageDTO);
|
||||
state.layers.entities.push({
|
||||
id,
|
||||
type: 'layer',
|
||||
isEnabled: true,
|
||||
bbox: null,
|
||||
bboxNeedsUpdate: false,
|
||||
objects: [imageObject],
|
||||
opacity: 1,
|
||||
x: pos.x + offsetX,
|
||||
y: pos.y + offsetY,
|
||||
});
|
||||
state.selectedEntityIdentifier = { type: 'layer', id };
|
||||
state.layers.imageCache = null;
|
||||
},
|
||||
prepare: (payload: { stagingAreaImage: StagingAreaImage; pos: Position }) => ({
|
||||
payload: { ...payload, id: uuidv4(), objectId: uuidv4() },
|
||||
}),
|
||||
},
|
||||
layerRecalled: (state, action: PayloadAction<{ data: LayerEntity }>) => {
|
||||
const { data } = action.payload;
|
||||
state.layers.entities.push(data);
|
||||
|
@ -60,7 +60,6 @@ export const paramsReducers = {
|
||||
}
|
||||
|
||||
// Update the bbox size to match the new model's optimal size
|
||||
// TODO(psyche): Should we change the document size too?
|
||||
const optimalDimension = getOptimalDimension(model);
|
||||
if (!getIsSizeOptimal(state.bbox.rect.width, state.bbox.rect.height, optimalDimension)) {
|
||||
const bboxDims = calculateNewSize(state.bbox.aspectRatio.value, optimalDimension * optimalDimension);
|
||||
|
@ -14,9 +14,9 @@ export const sessionReducers = {
|
||||
state.tool.selectedBuffer = state.tool.selected;
|
||||
state.tool.selected = 'view';
|
||||
},
|
||||
sessionImageStaged: (state, action: PayloadAction<StagingAreaImage>) => {
|
||||
const { imageDTO, rect } = action.payload;
|
||||
state.session.stagedImages.push({ imageDTO, rect });
|
||||
sessionImageStaged: (state, action: PayloadAction<{ stagingAreaImage: StagingAreaImage }>) => {
|
||||
const { stagingAreaImage } = action.payload;
|
||||
state.session.stagedImages.push(stagingAreaImage);
|
||||
state.session.selectedStagedImageIndex = state.session.stagedImages.length - 1;
|
||||
},
|
||||
sessionNextStagedImageSelected: (state) => {
|
||||
|
@ -828,7 +828,8 @@ export type LoRA = {
|
||||
|
||||
export type StagingAreaImage = {
|
||||
imageDTO: ImageDTO;
|
||||
rect: Rect;
|
||||
offsetX: number;
|
||||
offsetY: number;
|
||||
};
|
||||
|
||||
export type CanvasV2State = {
|
||||
|
@ -66,8 +66,7 @@ export const addInpaint = async (
|
||||
const canvasPasteBack = g.addNode({
|
||||
id: 'canvas_v2_mask_and_crop',
|
||||
type: 'canvas_v2_mask_and_crop',
|
||||
invert: true,
|
||||
crop_visible: true,
|
||||
mask_blur: compositing.maskBlur,
|
||||
});
|
||||
|
||||
// Resize initial image and mask to scaled size, feed into to gradient mask
|
||||
@ -113,8 +112,7 @@ export const addInpaint = async (
|
||||
const canvasPasteBack = g.addNode({
|
||||
id: 'canvas_v2_mask_and_crop',
|
||||
type: 'canvas_v2_mask_and_crop',
|
||||
invert: true,
|
||||
crop_visible: true,
|
||||
mask_blur: compositing.maskBlur,
|
||||
});
|
||||
g.addEdge(alphaToMask, 'image', createGradientMask, 'mask');
|
||||
g.addEdge(i2l, 'latents', denoise, 'latents');
|
||||
|
@ -10,7 +10,7 @@ import type { Invocation } from 'services/api/types';
|
||||
*/
|
||||
export const addNSFWChecker = (
|
||||
g: Graph,
|
||||
imageOutput: Invocation<'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_paste_back'>
|
||||
imageOutput: Invocation<'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop'>
|
||||
): Invocation<'img_nsfw'> => {
|
||||
const nsfw = g.addNode({
|
||||
id: NSFW_CHECKER,
|
||||
|
@ -101,8 +101,7 @@ export const addOutpaint = async (
|
||||
const canvasPasteBack = g.addNode({
|
||||
id: 'canvas_v2_mask_and_crop',
|
||||
type: 'canvas_v2_mask_and_crop',
|
||||
invert: true,
|
||||
crop_visible: true,
|
||||
mask_blur: compositing.maskBlur,
|
||||
});
|
||||
|
||||
// Resize initial image and mask to scaled size, feed into to gradient mask
|
||||
@ -147,8 +146,7 @@ export const addOutpaint = async (
|
||||
const canvasPasteBack = g.addNode({
|
||||
id: 'canvas_v2_mask_and_crop',
|
||||
type: 'canvas_v2_mask_and_crop',
|
||||
invert: true,
|
||||
crop_visible: true,
|
||||
mask_blur: compositing.maskBlur,
|
||||
});
|
||||
g.addEdge(maskAlphaToMask, 'image', maskCombine, 'mask1');
|
||||
g.addEdge(initialImageAlphaToMask, 'image', maskCombine, 'mask2');
|
||||
|
@ -10,7 +10,7 @@ import type { Invocation } from 'services/api/types';
|
||||
*/
|
||||
export const addWatermarker = (
|
||||
g: Graph,
|
||||
imageOutput: Invocation<'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_paste_back'>
|
||||
imageOutput: Invocation<'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop'>
|
||||
): Invocation<'img_watermark'> => {
|
||||
const watermark = g.addNode({
|
||||
id: WATERMARKER,
|
||||
|
Loading…
Reference in New Issue
Block a user