mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Moves image uploading to HTTP
- It all seems to work fine - A lot of cleanup is still needed - Logging needs to be added - May need types to be reviewed
This commit is contained in:
parent
b049bbc64e
commit
4382cd0b91
@ -46,6 +46,13 @@ class InvokeAIWebServer:
|
||||
self.esrgan = esrgan
|
||||
|
||||
self.canceled = Event()
|
||||
self.ALLOWED_EXTENSIONS = {"png", "jpg", "jpeg"}
|
||||
|
||||
def allowed_file(self, filename: str) -> bool:
|
||||
return (
|
||||
"." in filename
|
||||
and filename.rsplit(".", 1)[1].lower() in self.ALLOWED_EXTENSIONS
|
||||
)
|
||||
|
||||
def run(self):
|
||||
self.setup_app()
|
||||
@ -98,41 +105,70 @@ class InvokeAIWebServer:
|
||||
return send_from_directory(self.app.static_folder, "index.html")
|
||||
|
||||
@self.app.route("/upload", methods=["POST"])
|
||||
def upload_base64_file():
|
||||
def upload():
|
||||
try:
|
||||
data = request.get_json()
|
||||
dataURL = data["dataURL"]
|
||||
name = data["name"]
|
||||
# check if the post request has the file part
|
||||
if "file" not in request.files:
|
||||
return "No file part", 400
|
||||
file = request.files["file"]
|
||||
|
||||
print(f'>> Image upload requested "{name}"')
|
||||
# If the user does not select a file, the browser submits an
|
||||
# empty file without a filename.
|
||||
if file.filename == "":
|
||||
return "No selected file", 400
|
||||
|
||||
if dataURL is not None:
|
||||
bytes = dataURL_to_bytes(dataURL)
|
||||
kind = request.form["kind"]
|
||||
|
||||
file_path = self.save_file_unique_uuid_name(
|
||||
bytes=bytes, name=name, path=self.result_path
|
||||
if kind == "init":
|
||||
path = self.init_image_path
|
||||
elif kind == "temp":
|
||||
path = self.temp_image_path
|
||||
elif kind == "result":
|
||||
path = self.result_path
|
||||
elif kind == "mask":
|
||||
path = self.mask_image_path
|
||||
else:
|
||||
return f"Invalid upload kind: {kind}", 400
|
||||
|
||||
if not self.allowed_file(file.filename):
|
||||
return (
|
||||
f'Invalid file type, must be one of: {", ".join(self.ALLOWED_EXTENSIONS)}',
|
||||
400,
|
||||
)
|
||||
|
||||
mtime = os.path.getmtime(file_path)
|
||||
(width, height) = Image.open(file_path).size
|
||||
secured_filename = secure_filename(file.filename)
|
||||
|
||||
response = {
|
||||
uuid = uuid4().hex
|
||||
truncated_uuid = uuid[:8]
|
||||
|
||||
split = os.path.splitext(secured_filename)
|
||||
name = f"{split[0]}.{truncated_uuid}{split[1]}"
|
||||
|
||||
file_path = os.path.join(path, name)
|
||||
|
||||
file.save(file_path)
|
||||
|
||||
mtime = os.path.getmtime(file_path)
|
||||
(width, height) = Image.open(file_path).size
|
||||
|
||||
response = {
|
||||
"image": {
|
||||
"url": self.get_url_from_image_path(file_path),
|
||||
"mtime": mtime,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"category": "result",
|
||||
"destination": "outpainting_merge",
|
||||
}
|
||||
return response
|
||||
else:
|
||||
return "No dataURL provided"
|
||||
},
|
||||
}
|
||||
|
||||
return response, 200
|
||||
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
return "Error uploading file", 500
|
||||
|
||||
self.load_socketio_listeners(self.socketio)
|
||||
|
||||
@ -177,6 +213,7 @@ class InvokeAIWebServer:
|
||||
self.init_image_url = "outputs/init-images/"
|
||||
self.mask_image_url = "outputs/mask-images/"
|
||||
self.intermediate_url = "outputs/intermediates/"
|
||||
self.temp_image_url = "outputs/temp-images/"
|
||||
# location for "finished" images
|
||||
self.result_path = args.outdir
|
||||
# temporary path for intermediates
|
||||
@ -184,6 +221,8 @@ class InvokeAIWebServer:
|
||||
# path for user-uploaded init images and masks
|
||||
self.init_image_path = os.path.join(self.result_path, "init-images/")
|
||||
self.mask_image_path = os.path.join(self.result_path, "mask-images/")
|
||||
# path for temp images e.g. gallery generations which are not committed
|
||||
self.temp_image_path = os.path.join(self.result_path, "temp-images/")
|
||||
# txt log
|
||||
self.log_path = os.path.join(self.result_path, "invoke_log.txt")
|
||||
# make all output paths
|
||||
@ -194,6 +233,7 @@ class InvokeAIWebServer:
|
||||
self.intermediate_path,
|
||||
self.init_image_path,
|
||||
self.mask_image_path,
|
||||
self.temp_image_path,
|
||||
]
|
||||
]
|
||||
|
||||
@ -517,59 +557,6 @@ class InvokeAIWebServer:
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
|
||||
# TODO: I think this needs a safety mechanism.
|
||||
@socketio.on("uploadImage")
|
||||
def handle_upload_image(bytes, name, destination):
|
||||
try:
|
||||
print(f'>> Image upload requested "{name}"')
|
||||
file_path = self.save_file_unique_uuid_name(
|
||||
bytes=bytes, name=name, path=self.init_image_path
|
||||
)
|
||||
mtime = os.path.getmtime(file_path)
|
||||
(width, height) = Image.open(file_path).size
|
||||
|
||||
socketio.emit(
|
||||
"imageUploaded",
|
||||
{
|
||||
"url": self.get_url_from_image_path(file_path),
|
||||
"mtime": mtime,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"category": "user",
|
||||
"destination": destination,
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
|
||||
# TODO: I think this needs a safety mechanism.
|
||||
@socketio.on("uploadOutpaintingMergeImage")
|
||||
def handle_upload_outpainting_merge_image(dataURL, name):
|
||||
try:
|
||||
print(f'>> Outpainting merge image upload requested "{name}"')
|
||||
|
||||
image = dataURL_to_image(dataURL)
|
||||
file_name = self.make_unique_init_image_filename(name)
|
||||
file_path = os.path.join(self.result_path, file_name)
|
||||
image.save(file_path)
|
||||
|
||||
socketio.emit(
|
||||
"outpaintingMergeImageUploaded",
|
||||
{
|
||||
"url": self.get_url_from_image_path(file_path),
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
|
||||
# App Functions
|
||||
def get_system_config(self):
|
||||
model_list = self.generate.model_cache.list_models()
|
||||
@ -621,7 +608,7 @@ class InvokeAIWebServer:
|
||||
truncated_outpaint_mask_b64 = generation_parameters["init_mask"][:64]
|
||||
|
||||
init_img_url = generation_parameters["init_img"]
|
||||
|
||||
|
||||
original_bounding_box = generation_parameters["bounding_box"].copy()
|
||||
|
||||
if generation_parameters["generation_mode"] == "outpainting":
|
||||
@ -1247,6 +1234,10 @@ class InvokeAIWebServer:
|
||||
return os.path.abspath(
|
||||
os.path.join(self.intermediate_path, os.path.basename(url))
|
||||
)
|
||||
elif "temp-images" in url:
|
||||
return os.path.abspath(
|
||||
os.path.join(self.temp_image_path, os.path.basename(url))
|
||||
)
|
||||
else:
|
||||
return os.path.abspath(
|
||||
os.path.join(self.result_path, os.path.basename(url))
|
||||
@ -1267,6 +1258,8 @@ class InvokeAIWebServer:
|
||||
return os.path.join(self.mask_image_url, os.path.basename(path))
|
||||
elif "intermediates" in path:
|
||||
return os.path.join(self.intermediate_url, os.path.basename(path))
|
||||
elif "temp-images" in path:
|
||||
return os.path.join(self.temp_image_url, os.path.basename(path))
|
||||
else:
|
||||
return os.path.join(self.result_url, os.path.basename(path))
|
||||
except Exception as e:
|
||||
|
11
frontend/src/app/invokeai.d.ts
vendored
11
frontend/src/app/invokeai.d.ts
vendored
@ -118,7 +118,7 @@ export declare type Image = {
|
||||
width: number;
|
||||
height: number;
|
||||
category: GalleryCategory;
|
||||
isBase64: boolean;
|
||||
isBase64?: boolean;
|
||||
};
|
||||
|
||||
// GalleryImages is an array of Image.
|
||||
@ -178,8 +178,8 @@ export declare type ImageResultResponse = Omit<Image, 'uuid'> & {
|
||||
generationMode: InvokeTabName;
|
||||
};
|
||||
|
||||
export declare type ImageUploadResponse = Omit<Image, 'uuid' | 'metadata'> & {
|
||||
destination: 'img2img' | 'inpainting' | 'outpainting' | 'outpainting_merge';
|
||||
export declare type ImageUploadResponse = {
|
||||
image: Omit<Image, 'uuid' | 'metadata' | 'category'>;
|
||||
};
|
||||
|
||||
export declare type ErrorResponse = {
|
||||
@ -203,11 +203,6 @@ export declare type ImageUrlResponse = {
|
||||
url: string;
|
||||
};
|
||||
|
||||
export declare type ImageUploadDestination =
|
||||
| 'img2img'
|
||||
| 'inpainting'
|
||||
| 'outpainting_merge';
|
||||
|
||||
export declare type UploadImagePayload = {
|
||||
file: File;
|
||||
destination?: ImageUploadDestination;
|
||||
|
@ -330,41 +330,41 @@ const makeSocketIOListeners = (
|
||||
})
|
||||
);
|
||||
},
|
||||
onImageUploaded: (data: InvokeAI.ImageUploadResponse) => {
|
||||
const { destination, ...rest } = data;
|
||||
const image = {
|
||||
uuid: uuidv4(),
|
||||
...rest,
|
||||
};
|
||||
// onImageUploaded: (data: InvokeAI.ImageUploadResponse) => {
|
||||
// const { origin, image, kind } = data;
|
||||
// const newImage = {
|
||||
// uuid: uuidv4(),
|
||||
// ...image,
|
||||
// };
|
||||
|
||||
try {
|
||||
dispatch(addImage({ image, category: 'user' }));
|
||||
// try {
|
||||
// dispatch(addImage({ image: newImage, category: 'user' }));
|
||||
|
||||
switch (destination) {
|
||||
case 'img2img': {
|
||||
dispatch(setInitialImage(image));
|
||||
break;
|
||||
}
|
||||
case 'inpainting': {
|
||||
dispatch(setImageToInpaint(image));
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
dispatch(setCurrentImage(image));
|
||||
break;
|
||||
}
|
||||
}
|
||||
// switch (origin) {
|
||||
// case 'img2img': {
|
||||
// dispatch(setInitialImage(newImage));
|
||||
// break;
|
||||
// }
|
||||
// case 'inpainting': {
|
||||
// dispatch(setImageToInpaint(newImage));
|
||||
// break;
|
||||
// }
|
||||
// default: {
|
||||
// dispatch(setCurrentImage(newImage));
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
|
||||
dispatch(
|
||||
addLogEntry({
|
||||
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||
message: `Image uploaded: ${data.url}`,
|
||||
})
|
||||
);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
}
|
||||
},
|
||||
// dispatch(
|
||||
// addLogEntry({
|
||||
// timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||
// message: `Image uploaded: ${image.url}`,
|
||||
// })
|
||||
// );
|
||||
// } catch (e) {
|
||||
// console.error(e);
|
||||
// }
|
||||
// },
|
||||
/**
|
||||
* Callback to run when we receive a 'maskImageUploaded' event.
|
||||
*/
|
||||
|
@ -43,7 +43,7 @@ export const socketioMiddleware = () => {
|
||||
onGalleryImages,
|
||||
onProcessingCanceled,
|
||||
onImageDeleted,
|
||||
onImageUploaded,
|
||||
// onImageUploaded,
|
||||
onMaskImageUploaded,
|
||||
onSystemConfig,
|
||||
onModelChanged,
|
||||
@ -104,9 +104,9 @@ export const socketioMiddleware = () => {
|
||||
onImageDeleted(data);
|
||||
});
|
||||
|
||||
socketio.on('imageUploaded', (data: InvokeAI.ImageUploadResponse) => {
|
||||
onImageUploaded(data);
|
||||
});
|
||||
// socketio.on('imageUploaded', (data: InvokeAI.ImageUploadResponse) => {
|
||||
// onImageUploaded(data);
|
||||
// });
|
||||
|
||||
socketio.on('maskImageUploaded', (data: InvokeAI.ImageUrlResponse) => {
|
||||
onMaskImageUploaded(data);
|
||||
|
@ -8,12 +8,13 @@ import {
|
||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
||||
import { FileRejection, useDropzone } from 'react-dropzone';
|
||||
import { useToast } from '@chakra-ui/react';
|
||||
import { uploadImage } from 'app/socketio/actions';
|
||||
import { ImageUploadDestination, UploadImagePayload } from 'app/invokeai';
|
||||
// import { uploadImage } from 'app/socketio/actions';
|
||||
import { UploadImagePayload } from 'app/invokeai';
|
||||
import { ImageUploaderTriggerContext } from 'app/contexts/ImageUploaderTriggerContext';
|
||||
import { activeTabNameSelector } from 'features/options/optionsSelectors';
|
||||
import { tabDict } from 'features/tabs/InvokeTabs';
|
||||
import ImageUploadOverlay from './ImageUploadOverlay';
|
||||
import { uploadImage } from 'features/gallery/util/uploadImage';
|
||||
|
||||
type ImageUploaderProps = {
|
||||
children: ReactNode;
|
||||
@ -44,15 +45,12 @@ const ImageUploader = (props: ImageUploaderProps) => {
|
||||
);
|
||||
|
||||
const fileAcceptedCallback = useCallback(
|
||||
(file: File) => {
|
||||
setIsHandlingUpload(true);
|
||||
const payload: UploadImagePayload = { file };
|
||||
if (['img2img', 'inpainting', 'outpainting'].includes(activeTabName)) {
|
||||
payload.destination = activeTabName as ImageUploadDestination;
|
||||
}
|
||||
dispatch(uploadImage(payload));
|
||||
async (file: File) => {
|
||||
// setIsHandlingUpload(true);
|
||||
|
||||
dispatch(uploadImage({ imageFile: file }));
|
||||
},
|
||||
[dispatch, activeTabName]
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
const onDrop = useCallback(
|
||||
@ -124,12 +122,12 @@ const ImageUploader = (props: ImageUploaderProps) => {
|
||||
return;
|
||||
}
|
||||
|
||||
const payload: UploadImagePayload = { file };
|
||||
if (['img2img', 'inpainting'].includes(activeTabName)) {
|
||||
payload.destination = activeTabName as ImageUploadDestination;
|
||||
}
|
||||
// const payload: UploadImagePayload = { file };
|
||||
// if (['img2img', 'inpainting'].includes(activeTabName)) {
|
||||
// payload.destination = activeTabName as ImageUploadDestination;
|
||||
// }
|
||||
|
||||
dispatch(uploadImage(payload));
|
||||
// dispatch(uploadImage(payload));
|
||||
};
|
||||
document.addEventListener('paste', pasteImageListener);
|
||||
return () => {
|
||||
|
@ -197,11 +197,11 @@ const IAICanvas = () => {
|
||||
listening={false}
|
||||
/>
|
||||
)}
|
||||
{isStaging && <IAICanvasStagingArea />}
|
||||
<IAICanvasStagingArea visible={isStaging} />
|
||||
{shouldShowIntermediates && <IAICanvasIntermediateImage />}
|
||||
{!isStaging && (
|
||||
<IAICanvasBoundingBox visible={shouldShowBoundingBox} />
|
||||
)}
|
||||
<IAICanvasBoundingBox
|
||||
visible={shouldShowBoundingBox && !isStaging}
|
||||
/>
|
||||
</Layer>
|
||||
</Stage>
|
||||
{isOnOutpaintingTab && <IAICanvasStatusText />}
|
||||
|
@ -2,13 +2,7 @@ import { GroupConfig } from 'konva/lib/Group';
|
||||
import { Group, Line } from 'react-konva';
|
||||
import { useAppSelector } from 'app/store';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import {
|
||||
currentCanvasSelector,
|
||||
GenericCanvasState,
|
||||
InpaintingCanvasState,
|
||||
isCanvasMaskLine,
|
||||
OutpaintingCanvasState,
|
||||
} from './canvasSlice';
|
||||
import { currentCanvasSelector, isCanvasMaskLine } from './canvasSlice';
|
||||
import _ from 'lodash';
|
||||
|
||||
export const canvasLinesSelector = createSelector(
|
||||
|
@ -5,7 +5,6 @@ import {
|
||||
isStagingSelector,
|
||||
resetCanvas,
|
||||
setTool,
|
||||
uploadOutpaintingMergedImage,
|
||||
} from './canvasSlice';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
||||
import _ from 'lodash';
|
||||
@ -26,6 +25,7 @@ import IAICanvasSettingsButtonPopover from './IAICanvasSettingsButtonPopover';
|
||||
import IAICanvasEraserButtonPopover from './IAICanvasEraserButtonPopover';
|
||||
import IAICanvasBrushButtonPopover from './IAICanvasBrushButtonPopover';
|
||||
import IAICanvasMaskButtonPopover from './IAICanvasMaskButtonPopover';
|
||||
import { mergeAndUploadCanvas } from './util/mergeAndUploadCanvas';
|
||||
|
||||
export const canvasControlsSelector = createSelector(
|
||||
[currentCanvasSelector, isStagingSelector],
|
||||
@ -68,13 +68,23 @@ const IAICanvasOutpaintingControls = () => {
|
||||
tooltip="Merge Visible"
|
||||
icon={<FaLayerGroup />}
|
||||
onClick={() => {
|
||||
dispatch(uploadOutpaintingMergedImage(canvasImageLayerRef));
|
||||
dispatch(
|
||||
mergeAndUploadCanvas({
|
||||
canvasImageLayerRef,
|
||||
saveToGallery: false,
|
||||
})
|
||||
);
|
||||
}}
|
||||
/>
|
||||
<IAIIconButton
|
||||
aria-label="Save Selection to Gallery"
|
||||
tooltip="Save Selection to Gallery"
|
||||
aria-label="Save to Gallery"
|
||||
tooltip="Save to Gallery"
|
||||
icon={<FaSave />}
|
||||
onClick={() => {
|
||||
dispatch(
|
||||
mergeAndUploadCanvas({ canvasImageLayerRef, saveToGallery: true })
|
||||
);
|
||||
}}
|
||||
/>
|
||||
<IAIIconButton
|
||||
aria-label="Copy Selection"
|
||||
|
118
frontend/src/features/canvas/canvasReducers.ts
Normal file
118
frontend/src/features/canvas/canvasReducers.ts
Normal file
@ -0,0 +1,118 @@
|
||||
import * as InvokeAI from 'app/invokeai';
|
||||
import { PayloadAction } from '@reduxjs/toolkit';
|
||||
import { CanvasState, Dimensions, initialLayerState } from './canvasSlice';
|
||||
import { Vector2d } from 'konva/lib/types';
|
||||
import { roundDownToMultiple } from 'common/util/roundDownToMultiple';
|
||||
|
||||
export const setImageToInpaint_reducer = (
|
||||
state: CanvasState,
|
||||
image: InvokeAI.Image
|
||||
// action: PayloadAction<InvokeAI.Image>
|
||||
) => {
|
||||
const { width: canvasWidth, height: canvasHeight } =
|
||||
state.inpainting.stageDimensions;
|
||||
const { width, height } = state.inpainting.boundingBoxDimensions;
|
||||
const { x, y } = state.inpainting.boundingBoxCoordinates;
|
||||
|
||||
const maxWidth = Math.min(image.width, canvasWidth);
|
||||
const maxHeight = Math.min(image.height, canvasHeight);
|
||||
|
||||
const newCoordinates: Vector2d = { x, y };
|
||||
const newDimensions: Dimensions = { width, height };
|
||||
|
||||
if (width + x > maxWidth) {
|
||||
// Bounding box at least needs to be translated
|
||||
if (width > maxWidth) {
|
||||
// Bounding box also needs to be resized
|
||||
newDimensions.width = roundDownToMultiple(maxWidth, 64);
|
||||
}
|
||||
newCoordinates.x = maxWidth - newDimensions.width;
|
||||
}
|
||||
|
||||
if (height + y > maxHeight) {
|
||||
// Bounding box at least needs to be translated
|
||||
if (height > maxHeight) {
|
||||
// Bounding box also needs to be resized
|
||||
newDimensions.height = roundDownToMultiple(maxHeight, 64);
|
||||
}
|
||||
newCoordinates.y = maxHeight - newDimensions.height;
|
||||
}
|
||||
|
||||
state.inpainting.boundingBoxDimensions = newDimensions;
|
||||
state.inpainting.boundingBoxCoordinates = newCoordinates;
|
||||
|
||||
state.inpainting.pastLayerStates.push(state.inpainting.layerState);
|
||||
|
||||
state.inpainting.layerState = {
|
||||
...initialLayerState,
|
||||
objects: [
|
||||
{
|
||||
kind: 'image',
|
||||
layer: 'base',
|
||||
x: 0,
|
||||
y: 0,
|
||||
width: image.width,
|
||||
height: image.height,
|
||||
image: image,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
state.outpainting.futureLayerStates = [];
|
||||
state.doesCanvasNeedScaling = true;
|
||||
};
|
||||
|
||||
export const setImageToOutpaint_reducer = (
|
||||
state: CanvasState,
|
||||
image: InvokeAI.Image
|
||||
) => {
|
||||
const { width: canvasWidth, height: canvasHeight } =
|
||||
state.outpainting.stageDimensions;
|
||||
const { width, height } = state.outpainting.boundingBoxDimensions;
|
||||
const { x, y } = state.outpainting.boundingBoxCoordinates;
|
||||
|
||||
const maxWidth = Math.min(image.width, canvasWidth);
|
||||
const maxHeight = Math.min(image.height, canvasHeight);
|
||||
|
||||
const newCoordinates: Vector2d = { x, y };
|
||||
const newDimensions: Dimensions = { width, height };
|
||||
|
||||
if (width + x > maxWidth) {
|
||||
// Bounding box at least needs to be translated
|
||||
if (width > maxWidth) {
|
||||
// Bounding box also needs to be resized
|
||||
newDimensions.width = roundDownToMultiple(maxWidth, 64);
|
||||
}
|
||||
newCoordinates.x = maxWidth - newDimensions.width;
|
||||
}
|
||||
|
||||
if (height + y > maxHeight) {
|
||||
// Bounding box at least needs to be translated
|
||||
if (height > maxHeight) {
|
||||
// Bounding box also needs to be resized
|
||||
newDimensions.height = roundDownToMultiple(maxHeight, 64);
|
||||
}
|
||||
newCoordinates.y = maxHeight - newDimensions.height;
|
||||
}
|
||||
|
||||
state.outpainting.boundingBoxDimensions = newDimensions;
|
||||
state.outpainting.boundingBoxCoordinates = newCoordinates;
|
||||
|
||||
state.outpainting.pastLayerStates.push(state.outpainting.layerState);
|
||||
state.outpainting.layerState = {
|
||||
...initialLayerState,
|
||||
objects: [
|
||||
{
|
||||
kind: 'image',
|
||||
layer: 'base',
|
||||
x: 0,
|
||||
y: 0,
|
||||
width: image.width,
|
||||
height: image.height,
|
||||
image: image,
|
||||
},
|
||||
],
|
||||
};
|
||||
state.outpainting.futureLayerStates = [];
|
||||
state.doesCanvasNeedScaling = true;
|
||||
};
|
@ -13,6 +13,14 @@ import { roundDownToMultiple } from 'common/util/roundDownToMultiple';
|
||||
import { RootState } from 'app/store';
|
||||
import { MutableRefObject } from 'react';
|
||||
import Konva from 'konva';
|
||||
import { tabMap } from 'features/tabs/InvokeTabs';
|
||||
import { activeTabNameSelector } from 'features/options/optionsSelectors';
|
||||
import { mergeAndUploadCanvas } from './util/mergeAndUploadCanvas';
|
||||
import { uploadImage } from 'features/gallery/util/uploadImage';
|
||||
import {
|
||||
setImageToInpaint_reducer,
|
||||
setImageToOutpaint_reducer,
|
||||
} from './canvasReducers';
|
||||
|
||||
export interface GenericCanvasState {
|
||||
tool: CanvasTool;
|
||||
@ -75,6 +83,8 @@ export type CanvasImage = {
|
||||
layer: 'base';
|
||||
x: number;
|
||||
y: number;
|
||||
width: number;
|
||||
height: number;
|
||||
image: InvokeAI.Image;
|
||||
};
|
||||
|
||||
@ -137,7 +147,7 @@ export interface CanvasState {
|
||||
outpainting: OutpaintingCanvasState;
|
||||
}
|
||||
|
||||
const initialLayerState: CanvasLayerState = {
|
||||
export const initialLayerState: CanvasLayerState = {
|
||||
objects: [],
|
||||
stagingArea: {
|
||||
x: -1,
|
||||
@ -283,104 +293,10 @@ export const canvasSlice = createSlice({
|
||||
// state.inpainting.imageToInpaint = undefined;
|
||||
},
|
||||
setImageToOutpaint: (state, action: PayloadAction<InvokeAI.Image>) => {
|
||||
const { width: canvasWidth, height: canvasHeight } =
|
||||
state.outpainting.stageDimensions;
|
||||
const { width, height } = state.outpainting.boundingBoxDimensions;
|
||||
const { x, y } = state.outpainting.boundingBoxCoordinates;
|
||||
|
||||
const maxWidth = Math.min(action.payload.width, canvasWidth);
|
||||
const maxHeight = Math.min(action.payload.height, canvasHeight);
|
||||
|
||||
const newCoordinates: Vector2d = { x, y };
|
||||
const newDimensions: Dimensions = { width, height };
|
||||
|
||||
if (width + x > maxWidth) {
|
||||
// Bounding box at least needs to be translated
|
||||
if (width > maxWidth) {
|
||||
// Bounding box also needs to be resized
|
||||
newDimensions.width = roundDownToMultiple(maxWidth, 64);
|
||||
}
|
||||
newCoordinates.x = maxWidth - newDimensions.width;
|
||||
}
|
||||
|
||||
if (height + y > maxHeight) {
|
||||
// Bounding box at least needs to be translated
|
||||
if (height > maxHeight) {
|
||||
// Bounding box also needs to be resized
|
||||
newDimensions.height = roundDownToMultiple(maxHeight, 64);
|
||||
}
|
||||
newCoordinates.y = maxHeight - newDimensions.height;
|
||||
}
|
||||
|
||||
state.outpainting.boundingBoxDimensions = newDimensions;
|
||||
state.outpainting.boundingBoxCoordinates = newCoordinates;
|
||||
|
||||
state.outpainting.pastLayerStates.push(state.outpainting.layerState);
|
||||
state.outpainting.layerState = {
|
||||
...initialLayerState,
|
||||
objects: [
|
||||
{
|
||||
kind: 'image',
|
||||
layer: 'base',
|
||||
x: 0,
|
||||
y: 0,
|
||||
image: action.payload,
|
||||
},
|
||||
],
|
||||
};
|
||||
state.outpainting.futureLayerStates = [];
|
||||
state.doesCanvasNeedScaling = true;
|
||||
setImageToOutpaint_reducer(state, action.payload);
|
||||
},
|
||||
setImageToInpaint: (state, action: PayloadAction<InvokeAI.Image>) => {
|
||||
const { width: canvasWidth, height: canvasHeight } =
|
||||
state.inpainting.stageDimensions;
|
||||
const { width, height } = state.inpainting.boundingBoxDimensions;
|
||||
const { x, y } = state.inpainting.boundingBoxCoordinates;
|
||||
|
||||
const maxWidth = Math.min(action.payload.width, canvasWidth);
|
||||
const maxHeight = Math.min(action.payload.height, canvasHeight);
|
||||
|
||||
const newCoordinates: Vector2d = { x, y };
|
||||
const newDimensions: Dimensions = { width, height };
|
||||
|
||||
if (width + x > maxWidth) {
|
||||
// Bounding box at least needs to be translated
|
||||
if (width > maxWidth) {
|
||||
// Bounding box also needs to be resized
|
||||
newDimensions.width = roundDownToMultiple(maxWidth, 64);
|
||||
}
|
||||
newCoordinates.x = maxWidth - newDimensions.width;
|
||||
}
|
||||
|
||||
if (height + y > maxHeight) {
|
||||
// Bounding box at least needs to be translated
|
||||
if (height > maxHeight) {
|
||||
// Bounding box also needs to be resized
|
||||
newDimensions.height = roundDownToMultiple(maxHeight, 64);
|
||||
}
|
||||
newCoordinates.y = maxHeight - newDimensions.height;
|
||||
}
|
||||
|
||||
state.inpainting.boundingBoxDimensions = newDimensions;
|
||||
state.inpainting.boundingBoxCoordinates = newCoordinates;
|
||||
|
||||
state.inpainting.pastLayerStates.push(state.inpainting.layerState);
|
||||
|
||||
state.inpainting.layerState = {
|
||||
...initialLayerState,
|
||||
objects: [
|
||||
{
|
||||
kind: 'image',
|
||||
layer: 'base',
|
||||
x: 0,
|
||||
y: 0,
|
||||
image: action.payload,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
state.outpainting.futureLayerStates = [];
|
||||
state.doesCanvasNeedScaling = true;
|
||||
setImageToInpaint_reducer(state, action.payload);
|
||||
},
|
||||
setStageDimensions: (state, action: PayloadAction<Dimensions>) => {
|
||||
state[state.currentCanvas].stageDimensions = action.payload;
|
||||
@ -568,8 +484,7 @@ export const canvasSlice = createSlice({
|
||||
currentCanvas.layerState.stagingArea.images.push({
|
||||
kind: 'image',
|
||||
layer: 'base',
|
||||
x: boundingBox.x,
|
||||
y: boundingBox.y,
|
||||
...boundingBox,
|
||||
image,
|
||||
});
|
||||
|
||||
@ -705,14 +620,8 @@ export const canvasSlice = createSlice({
|
||||
currentCanvas.pastLayerStates.shift();
|
||||
}
|
||||
|
||||
const { x, y, image } = images[selectedImageIndex];
|
||||
|
||||
currentCanvas.layerState.objects.push({
|
||||
kind: 'image',
|
||||
layer: 'base',
|
||||
x,
|
||||
y,
|
||||
image,
|
||||
...images[selectedImageIndex],
|
||||
});
|
||||
|
||||
currentCanvas.layerState.stagingArea = {
|
||||
@ -723,20 +632,39 @@ export const canvasSlice = createSlice({
|
||||
},
|
||||
},
|
||||
extraReducers: (builder) => {
|
||||
builder.addCase(uploadOutpaintingMergedImage.fulfilled, (state, action) => {
|
||||
builder.addCase(mergeAndUploadCanvas.fulfilled, (state, action) => {
|
||||
if (!action.payload) return;
|
||||
state.outpainting.pastLayerStates.push({
|
||||
...state.outpainting.layerState,
|
||||
});
|
||||
state.outpainting.futureLayerStates = [];
|
||||
const { image, kind, boundingBox } = action.payload;
|
||||
|
||||
state.outpainting.layerState.objects = [
|
||||
{
|
||||
kind: 'image',
|
||||
layer: 'base',
|
||||
...action.payload,
|
||||
},
|
||||
];
|
||||
if (kind === 'temp_merged_canvas') {
|
||||
state.outpainting.pastLayerStates.push({
|
||||
...state.outpainting.layerState,
|
||||
});
|
||||
|
||||
state.outpainting.futureLayerStates = [];
|
||||
|
||||
state.outpainting.layerState.objects = [
|
||||
{
|
||||
kind: 'image',
|
||||
layer: 'base',
|
||||
...boundingBox,
|
||||
image,
|
||||
},
|
||||
];
|
||||
}
|
||||
});
|
||||
|
||||
builder.addCase(uploadImage.fulfilled, (state, action) => {
|
||||
if (!action.payload) return;
|
||||
const { image, kind, activeTabName } = action.payload;
|
||||
|
||||
if (kind !== 'init') return;
|
||||
|
||||
if (activeTabName === 'inpainting') {
|
||||
setImageToInpaint_reducer(state, image);
|
||||
} else if (activeTabName === 'outpainting') {
|
||||
setImageToOutpaint_reducer(state, image);
|
||||
}
|
||||
});
|
||||
},
|
||||
});
|
||||
@ -799,66 +727,6 @@ export const {
|
||||
|
||||
export default canvasSlice.reducer;
|
||||
|
||||
export const uploadOutpaintingMergedImage = createAsyncThunk(
|
||||
'canvas/uploadOutpaintingMergedImage',
|
||||
async (
|
||||
canvasImageLayerRef: MutableRefObject<Konva.Layer | null>,
|
||||
thunkAPI
|
||||
) => {
|
||||
const { getState } = thunkAPI;
|
||||
|
||||
const state = getState() as RootState;
|
||||
const stageScale = state.canvas.outpainting.stageScale;
|
||||
|
||||
if (!canvasImageLayerRef.current) return;
|
||||
const tempScale = canvasImageLayerRef.current.scale();
|
||||
|
||||
const { x: relativeX, y: relativeY } =
|
||||
canvasImageLayerRef.current.getClientRect({
|
||||
relativeTo: canvasImageLayerRef.current.getParent(),
|
||||
});
|
||||
|
||||
canvasImageLayerRef.current.scale({
|
||||
x: 1 / stageScale,
|
||||
y: 1 / stageScale,
|
||||
});
|
||||
|
||||
const clientRect = canvasImageLayerRef.current.getClientRect();
|
||||
|
||||
const imageDataURL = canvasImageLayerRef.current.toDataURL(clientRect);
|
||||
|
||||
canvasImageLayerRef.current.scale(tempScale);
|
||||
|
||||
if (!imageDataURL) return;
|
||||
|
||||
const response = await fetch(window.location.origin + '/upload', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
dataURL: imageDataURL,
|
||||
name: 'outpaintingmerge.png',
|
||||
}),
|
||||
});
|
||||
|
||||
const data = (await response.json()) as InvokeAI.ImageUploadResponse;
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||
const { destination, ...rest } = data;
|
||||
const image = {
|
||||
uuid: uuidv4(),
|
||||
...rest,
|
||||
};
|
||||
|
||||
return {
|
||||
image,
|
||||
x: relativeX,
|
||||
y: relativeY,
|
||||
};
|
||||
}
|
||||
);
|
||||
|
||||
export const currentCanvasSelector = (state: RootState): BaseCanvasState =>
|
||||
state.canvas[state.canvas.currentCanvas];
|
||||
|
||||
|
26
frontend/src/features/canvas/util/layerToBlob.ts
Normal file
26
frontend/src/features/canvas/util/layerToBlob.ts
Normal file
@ -0,0 +1,26 @@
|
||||
import Konva from 'konva';
|
||||
|
||||
const layerToBlob = async (layer: Konva.Layer, stageScale: number) => {
|
||||
const tempScale = layer.scale();
|
||||
|
||||
const { x: relativeX, y: relativeY } = layer.getClientRect({
|
||||
relativeTo: layer.getParent(),
|
||||
});
|
||||
|
||||
// Scale the canvas before getting it as a Blob
|
||||
layer.scale({
|
||||
x: 1 / stageScale,
|
||||
y: 1 / stageScale,
|
||||
});
|
||||
|
||||
const clientRect = layer.getClientRect();
|
||||
|
||||
const blob = await layer.toBlob(clientRect);
|
||||
|
||||
// Unscale the canvas
|
||||
layer.scale(tempScale);
|
||||
|
||||
return { blob, relativeX, relativeY };
|
||||
};
|
||||
|
||||
export default layerToBlob;
|
64
frontend/src/features/canvas/util/mergeAndUploadCanvas.ts
Normal file
64
frontend/src/features/canvas/util/mergeAndUploadCanvas.ts
Normal file
@ -0,0 +1,64 @@
|
||||
import { createAsyncThunk } from '@reduxjs/toolkit';
|
||||
import { RootState } from 'app/store';
|
||||
import Konva from 'konva';
|
||||
import { MutableRefObject } from 'react';
|
||||
import * as InvokeAI from 'app/invokeai';
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
import layerToBlob from './layerToBlob';
|
||||
|
||||
export const mergeAndUploadCanvas = createAsyncThunk(
|
||||
'canvas/mergeAndUploadCanvas',
|
||||
async (
|
||||
args: {
|
||||
canvasImageLayerRef: MutableRefObject<Konva.Layer | null>;
|
||||
saveToGallery: boolean;
|
||||
},
|
||||
thunkAPI
|
||||
) => {
|
||||
const { canvasImageLayerRef, saveToGallery } = args;
|
||||
|
||||
const { getState } = thunkAPI;
|
||||
|
||||
const state = getState() as RootState;
|
||||
|
||||
const stageScale = state.canvas[state.canvas.currentCanvas].stageScale;
|
||||
|
||||
if (!canvasImageLayerRef.current) return;
|
||||
|
||||
const { blob, relativeX, relativeY } = await layerToBlob(
|
||||
canvasImageLayerRef.current,
|
||||
stageScale
|
||||
);
|
||||
|
||||
if (!blob) return;
|
||||
|
||||
const formData = new FormData();
|
||||
|
||||
formData.append('file', blob as Blob, 'merged_canvas.png');
|
||||
formData.append('kind', saveToGallery ? 'result' : 'temp');
|
||||
|
||||
const response = await fetch(window.location.origin + '/upload', {
|
||||
method: 'POST',
|
||||
body: formData,
|
||||
});
|
||||
|
||||
const { image } = (await response.json()) as InvokeAI.ImageUploadResponse;
|
||||
|
||||
const newImage: InvokeAI.Image = {
|
||||
uuid: uuidv4(),
|
||||
category: saveToGallery ? 'result' : 'user',
|
||||
...image,
|
||||
};
|
||||
|
||||
return {
|
||||
image: newImage,
|
||||
kind: saveToGallery ? 'merged_canvas' : 'temp_merged_canvas',
|
||||
boundingBox: {
|
||||
x: relativeX,
|
||||
y: relativeY,
|
||||
width: image.width,
|
||||
height: image.height,
|
||||
},
|
||||
};
|
||||
}
|
||||
);
|
@ -4,6 +4,10 @@ import _, { clamp } from 'lodash';
|
||||
import * as InvokeAI from 'app/invokeai';
|
||||
import { IRect } from 'konva/lib/types';
|
||||
import { InvokeTabName } from 'features/tabs/InvokeTabs';
|
||||
import { mergeAndUploadCanvas } from 'features/canvas/util/mergeAndUploadCanvas';
|
||||
import { uploadImage } from './util/uploadImage';
|
||||
import { setInitialImage } from 'features/options/optionsSlice';
|
||||
import { setImageToInpaint } from 'features/canvas/canvasSlice';
|
||||
|
||||
export type GalleryCategory = 'user' | 'result';
|
||||
|
||||
@ -25,7 +29,10 @@ export type Gallery = {
|
||||
export interface GalleryState {
|
||||
currentImage?: InvokeAI.Image;
|
||||
currentImageUuid: string;
|
||||
intermediateImage?: InvokeAI.Image & { boundingBox?: IRect; generationMode?: InvokeTabName };
|
||||
intermediateImage?: InvokeAI.Image & {
|
||||
boundingBox?: IRect;
|
||||
generationMode?: InvokeTabName;
|
||||
};
|
||||
shouldPinGallery: boolean;
|
||||
shouldShowGallery: boolean;
|
||||
galleryScrollPosition: number;
|
||||
@ -261,6 +268,46 @@ export const gallerySlice = createSlice({
|
||||
state.galleryWidth = action.payload;
|
||||
},
|
||||
},
|
||||
extraReducers: (builder) => {
|
||||
builder.addCase(mergeAndUploadCanvas.fulfilled, (state, action) => {
|
||||
if (!action.payload) return;
|
||||
const { image, kind, boundingBox } = action.payload;
|
||||
|
||||
if (kind === 'merged_canvas') {
|
||||
const { uuid, url, mtime } = image;
|
||||
|
||||
state.categories.result.images.unshift(image);
|
||||
|
||||
if (state.shouldAutoSwitchToNewImages) {
|
||||
state.currentImageUuid = uuid;
|
||||
state.currentImage = image;
|
||||
state.currentCategory = 'result';
|
||||
}
|
||||
|
||||
state.intermediateImage = undefined;
|
||||
state.categories.result.latest_mtime = mtime;
|
||||
}
|
||||
});
|
||||
builder.addCase(uploadImage.fulfilled, (state, action) => {
|
||||
if (!action.payload) return;
|
||||
const { image, kind } = action.payload;
|
||||
|
||||
if (kind === 'init') {
|
||||
const { uuid, mtime } = image;
|
||||
|
||||
state.categories.result.images.unshift(image);
|
||||
|
||||
if (state.shouldAutoSwitchToNewImages) {
|
||||
state.currentImageUuid = uuid;
|
||||
state.currentImage = image;
|
||||
state.currentCategory = 'user';
|
||||
}
|
||||
|
||||
state.intermediateImage = undefined;
|
||||
state.categories.result.latest_mtime = mtime;
|
||||
}
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
export const {
|
||||
|
47
frontend/src/features/gallery/util/uploadImage.ts
Normal file
47
frontend/src/features/gallery/util/uploadImage.ts
Normal file
@ -0,0 +1,47 @@
|
||||
import { createAsyncThunk } from '@reduxjs/toolkit';
|
||||
import { RootState } from 'app/store';
|
||||
import * as InvokeAI from 'app/invokeai';
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
import { activeTabNameSelector } from 'features/options/optionsSelectors';
|
||||
|
||||
export const uploadImage = createAsyncThunk(
|
||||
'gallery/uploadImage',
|
||||
async (
|
||||
args: {
|
||||
imageFile: File;
|
||||
},
|
||||
thunkAPI
|
||||
) => {
|
||||
const { imageFile } = args;
|
||||
|
||||
const { getState } = thunkAPI;
|
||||
|
||||
const state = getState() as RootState;
|
||||
|
||||
const activeTabName = activeTabNameSelector(state);
|
||||
|
||||
const formData = new FormData();
|
||||
|
||||
formData.append('file', imageFile, imageFile.name);
|
||||
formData.append('kind', 'init');
|
||||
|
||||
const response = await fetch(window.location.origin + '/upload', {
|
||||
method: 'POST',
|
||||
body: formData,
|
||||
});
|
||||
|
||||
const { image } = (await response.json()) as InvokeAI.ImageUploadResponse;
|
||||
|
||||
const newImage: InvokeAI.Image = {
|
||||
uuid: uuidv4(),
|
||||
category: 'user',
|
||||
...image,
|
||||
};
|
||||
|
||||
return {
|
||||
image: newImage,
|
||||
kind: 'init',
|
||||
activeTabName,
|
||||
};
|
||||
}
|
||||
);
|
@ -5,6 +5,7 @@ import promptToString from 'common/util/promptToString';
|
||||
import { seedWeightsToString } from 'common/util/seedWeightPairs';
|
||||
import { FACETOOL_TYPES } from 'app/constants';
|
||||
import { InvokeTabName, tabMap } from 'features/tabs/InvokeTabs';
|
||||
import { uploadImage } from 'features/gallery/util/uploadImage';
|
||||
|
||||
export type UpscalingLevel = 2 | 4;
|
||||
|
||||
@ -361,6 +362,16 @@ export const optionsSlice = createSlice({
|
||||
state.isLightBoxOpen = action.payload;
|
||||
},
|
||||
},
|
||||
extraReducers: (builder) => {
|
||||
builder.addCase(uploadImage.fulfilled, (state, action) => {
|
||||
if (!action.payload) return;
|
||||
const { image, kind, activeTabName } = action.payload;
|
||||
|
||||
if (kind === 'init' && activeTabName === 'img2img') {
|
||||
state.initialImage = image;
|
||||
}
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
export const {
|
||||
|
Loading…
Reference in New Issue
Block a user