feat(ui): restore save/copy/download/merge functionality

This commit is contained in:
psychedelicious
2023-05-15 22:21:03 +10:00
parent d95fe5925a
commit d2c9140e69
25 changed files with 519 additions and 375 deletions

View File

@ -0,0 +1,9 @@
export const blobToDataURL = (blob: Blob): Promise<string> => {
return new Promise<string>((resolve, reject) => {
const reader = new FileReader();
reader.onload = (_e) => resolve(reader.result as string);
reader.onerror = (_e) => reject(reader.error);
reader.onabort = (_e) => reject(new Error('Read aborted'));
reader.readAsDataURL(blob);
});
};

View File

@ -0,0 +1,10 @@
/**
* Copies a blob to the clipboard by calling navigator.clipboard.write().
*/
export const copyBlobToClipboard = (blob: Blob) => {
navigator.clipboard.write([
new ClipboardItem({
[blob.type]: blob,
}),
]);
};

View File

@ -0,0 +1,61 @@
import { CanvasMaskLine } from 'features/canvas/store/canvasTypes';
import Konva from 'konva';
import { IRect } from 'konva/lib/types';
/**
* Creates a stage from array of mask objects.
* We cannot just convert the mask layer to a blob because it uses a texture with transparent areas.
* So instead we create a new stage with the mask layer and composite it onto a white background.
*/
const createMaskStage = async (
lines: CanvasMaskLine[],
boundingBox: IRect
): Promise<Konva.Stage> => {
// create an offscreen canvas and add the mask to it
const { width, height } = boundingBox;
const offscreenContainer = document.createElement('div');
const maskStage = new Konva.Stage({
container: offscreenContainer,
width: width,
height: height,
});
const baseLayer = new Konva.Layer();
const maskLayer = new Konva.Layer();
// composite the image onto the mask layer
baseLayer.add(
new Konva.Rect({
...boundingBox,
fill: 'white',
})
);
lines.forEach((line) =>
maskLayer.add(
new Konva.Line({
points: line.points,
stroke: 'black',
strokeWidth: line.strokeWidth * 2,
tension: 0,
lineCap: 'round',
lineJoin: 'round',
shadowForStrokeEnabled: false,
globalCompositeOperation:
line.tool === 'brush' ? 'source-over' : 'destination-out',
})
)
);
maskStage.add(baseLayer);
maskStage.add(maskLayer);
// you'd think we can't do this until we finish with the maskStage, but we can
offscreenContainer.remove();
return maskStage;
};
export default createMaskStage;

View File

@ -0,0 +1,11 @@
/** Download a blob as a file */
export const downloadBlob = (blob: Blob, fileName: string) => {
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = fileName;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
a.remove();
};

View File

@ -1,170 +0,0 @@
// import { CanvasMaskLine } from 'features/canvas/store/canvasTypes';
// import Konva from 'konva';
// import { Stage } from 'konva/lib/Stage';
// import { IRect } from 'konva/lib/types';
// /**
// * Generating a mask image from InpaintingCanvas.tsx is not as simple
// * as calling toDataURL() on the canvas, because the mask may be represented
// * by colored lines or transparency, or the user may have inverted the mask
// * display.
// *
// * So we need to regenerate the mask image by creating an offscreen canvas,
// * drawing the mask and compositing everything correctly to output a valid
// * mask image.
// */
// export const getStageDataURL = (stage: Stage, boundingBox: IRect): string => {
// // create an offscreen canvas and add the mask to it
// // const { stage, offscreenContainer } = buildMaskStage(lines, boundingBox);
// const dataURL = stage.toDataURL({ ...boundingBox });
// // const imageData = stage
// // .toCanvas()
// // .getContext('2d')
// // ?.getImageData(
// // boundingBox.x,
// // boundingBox.y,
// // boundingBox.width,
// // boundingBox.height
// // );
// // offscreenContainer.remove();
// // return { dataURL, imageData };
// return dataURL;
// };
// export const getStageImageData = (
// stage: Stage,
// boundingBox: IRect
// ): ImageData | undefined => {
// const imageData = stage
// .toCanvas()
// .getContext('2d')
// ?.getImageData(
// boundingBox.x,
// boundingBox.y,
// boundingBox.width,
// boundingBox.height
// );
// return imageData;
// };
// export const buildMaskStage = (
// lines: CanvasMaskLine[],
// boundingBox: IRect
// ): { stage: Stage; offscreenContainer: HTMLDivElement } => {
// // create an offscreen canvas and add the mask to it
// const { width, height } = boundingBox;
// const offscreenContainer = document.createElement('div');
// const stage = new Konva.Stage({
// container: offscreenContainer,
// width: width,
// height: height,
// });
// const baseLayer = new Konva.Layer();
// const maskLayer = new Konva.Layer();
// // composite the image onto the mask layer
// baseLayer.add(
// new Konva.Rect({
// ...boundingBox,
// fill: 'white',
// })
// );
// lines.forEach((line) =>
// maskLayer.add(
// new Konva.Line({
// points: line.points,
// stroke: 'black',
// strokeWidth: line.strokeWidth * 2,
// tension: 0,
// lineCap: 'round',
// lineJoin: 'round',
// shadowForStrokeEnabled: false,
// globalCompositeOperation:
// line.tool === 'brush' ? 'source-over' : 'destination-out',
// })
// )
// );
// stage.add(baseLayer);
// stage.add(maskLayer);
// return { stage, offscreenContainer };
// };
import { CanvasMaskLine } from 'features/canvas/store/canvasTypes';
import Konva from 'konva';
import { IRect } from 'konva/lib/types';
import { canvasToBlob } from './canvasToBlob';
/**
* Generating a mask image from InpaintingCanvas.tsx is not as simple
* as calling toDataURL() on the canvas, because the mask may be represented
* by colored lines or transparency, or the user may have inverted the mask
* display.
*
* So we need to regenerate the mask image by creating an offscreen canvas,
* drawing the mask and compositing everything correctly to output a valid
* mask image.
*/
const generateMask = async (lines: CanvasMaskLine[], boundingBox: IRect) => {
// create an offscreen canvas and add the mask to it
const { width, height } = boundingBox;
const offscreenContainer = document.createElement('div');
const stage = new Konva.Stage({
container: offscreenContainer,
width: width,
height: height,
});
const baseLayer = new Konva.Layer();
const maskLayer = new Konva.Layer();
// composite the image onto the mask layer
baseLayer.add(
new Konva.Rect({
...boundingBox,
fill: 'white',
})
);
lines.forEach((line) =>
maskLayer.add(
new Konva.Line({
points: line.points,
stroke: 'black',
strokeWidth: line.strokeWidth * 2,
tension: 0,
lineCap: 'round',
lineJoin: 'round',
shadowForStrokeEnabled: false,
globalCompositeOperation:
line.tool === 'brush' ? 'source-over' : 'destination-out',
})
)
);
stage.add(baseLayer);
stage.add(maskLayer);
const maskDataURL = stage.toDataURL(boundingBox);
const maskBlob = await canvasToBlob(stage.toCanvas(boundingBox));
offscreenContainer.remove();
return { maskDataURL, maskBlob };
};
export default generateMask;

View File

@ -0,0 +1,38 @@
import { getCanvasBaseLayer } from './konvaInstanceProvider';
import { RootState } from 'app/store/store';
import { konvaNodeToBlob } from './konvaNodeToBlob';
export const getBaseLayerBlob = async (
state: RootState,
withoutBoundingBox?: boolean
) => {
const canvasBaseLayer = getCanvasBaseLayer();
if (!canvasBaseLayer) {
return;
}
const {
shouldCropToBoundingBoxOnSave,
boundingBoxCoordinates,
boundingBoxDimensions,
} = state.canvas;
const clonedBaseLayer = canvasBaseLayer.clone();
clonedBaseLayer.scale({ x: 1, y: 1 });
const absPos = clonedBaseLayer.getAbsolutePosition();
const boundingBox =
shouldCropToBoundingBoxOnSave && !withoutBoundingBox
? {
x: boundingBoxCoordinates.x + absPos.x,
y: boundingBoxCoordinates.y + absPos.y,
width: boundingBoxDimensions.width,
height: boundingBoxDimensions.height,
}
: clonedBaseLayer.getClientRect();
return konvaNodeToBlob(clonedBaseLayer, boundingBox);
};

View File

@ -2,17 +2,15 @@ import { RootState } from 'app/store/store';
import { getCanvasBaseLayer, getCanvasStage } from './konvaInstanceProvider';
import { isCanvasMaskLine } from '../store/canvasTypes';
import { log } from 'app/logging/useLogger';
import {
areAnyPixelsBlack,
getImageDataTransparency,
} from 'common/util/arrayBuffer';
import openBase64ImageInTab from 'common/util/openBase64ImageInTab';
import generateMask from './generateMask';
import { dataURLToImageData } from './dataURLToImageData';
import { canvasToBlob } from './canvasToBlob';
import createMaskStage from './createMaskStage';
import { konvaNodeToImageData } from './konvaNodeToImageData';
import { konvaNodeToBlob } from './konvaNodeToBlob';
const moduleLog = log.child({ namespace: 'getCanvasDataURLs' });
/**
* Gets Blob and ImageData objects for the base and mask layers
*/
export const getCanvasData = async (state: RootState) => {
const canvasBaseLayer = getCanvasBaseLayer();
const canvasStage = getCanvasStage();
@ -27,10 +25,6 @@ export const getCanvasData = async (state: RootState) => {
boundingBoxCoordinates,
boundingBoxDimensions,
isMaskEnabled,
shouldPreserveMaskedArea,
boundingBoxScaleMethod: boundingBoxScale,
scaledBoundingBoxDimensions,
stageCoordinates,
} = state.canvas;
const boundingBox = {
@ -38,18 +32,10 @@ export const getCanvasData = async (state: RootState) => {
...boundingBoxDimensions,
};
// generationParameters.fit = false;
// generationParameters.strength = img2imgStrength;
// generationParameters.invert_mask = shouldPreserveMaskedArea;
// generationParameters.bounding_box = boundingBox;
// clone the base layer so we don't affect the actual canvas during scaling
// Clone the base layer so we don't affect the visible base layer
const clonedBaseLayer = canvasBaseLayer.clone();
// scale to 1 so we get an uninterpolated image
// Scale it to 100% so we get full resolution
clonedBaseLayer.scale({ x: 1, y: 1 });
// absolute position is needed to get the bounding box coords relative to the base layer
@ -62,73 +48,25 @@ export const getCanvasData = async (state: RootState) => {
height: boundingBox.height,
};
// get a dataURL of the bbox'd region (will convert this to an ImageData to check its transparency)
const baseDataURL = clonedBaseLayer.toDataURL(offsetBoundingBox);
// get a blob (will upload this as the canvas intermediate)
const baseBlob = await canvasToBlob(
clonedBaseLayer.toCanvas(offsetBoundingBox)
// For the base layer, use the offset boundingBox
const baseBlob = await konvaNodeToBlob(clonedBaseLayer, offsetBoundingBox);
const baseImageData = await konvaNodeToImageData(
clonedBaseLayer,
offsetBoundingBox
);
// build a new mask layer and get its dataURL and blob
const { maskDataURL, maskBlob } = await generateMask(
isMaskEnabled ? objects.filter(isCanvasMaskLine) : [],
// For the mask layer, use the normal boundingBox
const maskStage = await createMaskStage(
isMaskEnabled ? objects.filter(isCanvasMaskLine) : [], // only include mask lines, and only if mask is enabled
boundingBox
);
// convert to ImageData (via pure jank)
const baseImageData = await dataURLToImageData(
baseDataURL,
boundingBox.width,
boundingBox.height
);
// convert to ImageData (via pure jank)
const maskImageData = await dataURLToImageData(
maskDataURL,
boundingBox.width,
boundingBox.height
);
// check transparency
const {
isPartiallyTransparent: baseIsPartiallyTransparent,
isFullyTransparent: baseIsFullyTransparent,
} = getImageDataTransparency(baseImageData.data);
// check mask for black
const doesMaskHaveBlackPixels = areAnyPixelsBlack(maskImageData.data);
if (state.system.enableImageDebugging) {
openBase64ImageInTab([
{ base64: maskDataURL, caption: 'mask b64' },
{ base64: baseDataURL, caption: 'image b64' },
]);
}
// generationParameters.init_img = imageDataURL;
// generationParameters.progress_images = false;
// if (boundingBoxScale !== 'none') {
// generationParameters.inpaint_width = scaledBoundingBoxDimensions.width;
// generationParameters.inpaint_height = scaledBoundingBoxDimensions.height;
// }
// generationParameters.seam_size = seamSize;
// generationParameters.seam_blur = seamBlur;
// generationParameters.seam_strength = seamStrength;
// generationParameters.seam_steps = seamSteps;
// generationParameters.tile_size = tileSize;
// generationParameters.infill_method = infillMethod;
// generationParameters.force_outpaint = false;
const maskBlob = await konvaNodeToBlob(maskStage, boundingBox);
const maskImageData = await konvaNodeToImageData(maskStage, boundingBox);
return {
baseDataURL,
baseBlob,
maskDataURL,
baseImageData,
maskBlob,
baseIsPartiallyTransparent,
baseIsFullyTransparent,
doesMaskHaveBlackPixels,
maskImageData,
};
};

View File

@ -0,0 +1,31 @@
import {
areAnyPixelsBlack,
getImageDataTransparency,
} from 'common/util/arrayBuffer';
export const getCanvasGenerationMode = (
baseImageData: ImageData,
maskImageData: ImageData
) => {
const {
isPartiallyTransparent: baseIsPartiallyTransparent,
isFullyTransparent: baseIsFullyTransparent,
} = getImageDataTransparency(baseImageData.data);
// check mask for black
const doesMaskHaveBlackPixels = areAnyPixelsBlack(maskImageData.data);
if (baseIsPartiallyTransparent) {
if (baseIsFullyTransparent) {
return 'txt2img';
}
return 'outpaint';
} else {
if (doesMaskHaveBlackPixels) {
return 'inpaint';
}
return 'img2img';
}
};

View File

@ -0,0 +1,16 @@
import Konva from 'konva';
import { IRect } from 'konva/lib/types';
import { canvasToBlob } from './canvasToBlob';
/**
* Converts a Konva node to a Blob
* @param node - The Konva node to convert to a Blob
* @param boundingBox - The bounding box to crop to
* @returns A Promise that resolves with Blob of the node cropped to the bounding box
*/
export const konvaNodeToBlob = async (
node: Konva.Node,
boundingBox: IRect
): Promise<Blob> => {
return await canvasToBlob(node.toCanvas(boundingBox));
};

View File

@ -0,0 +1,16 @@
import Konva from 'konva';
import { IRect } from 'konva/lib/types';
/**
* Converts a Konva node to a dataURL
* @param node - The Konva node to convert to a dataURL
* @param boundingBox - The bounding box to crop to
* @returns A dataURL of the node cropped to the bounding box
*/
export const konvaNodeToDataURL = (
node: Konva.Node,
boundingBox: IRect
): string => {
// get a dataURL of the bbox'd region
return node.toDataURL(boundingBox);
};

View File

@ -0,0 +1,23 @@
import Konva from 'konva';
import { IRect } from 'konva/lib/types';
import { dataURLToImageData } from './dataURLToImageData';
/**
* Converts a Konva node to an ImageData object
* @param node - The Konva node to convert to an ImageData object
* @param boundingBox - The bounding box to crop to
* @returns A Promise that resolves with ImageData object of the node cropped to the bounding box
*/
export const konvaNodeToImageData = async (
node: Konva.Node,
boundingBox: IRect
): Promise<ImageData> => {
// get a dataURL of the bbox'd region
const dataURL = node.toDataURL(boundingBox);
return await dataURLToImageData(
dataURL,
boundingBox.width,
boundingBox.height
);
};