tidy(ui): remove unused stuff

This commit is contained in:
psychedelicious 2024-08-23 11:27:03 +10:00
parent 6a21f5fde1
commit f10248e3f5
32 changed files with 24 additions and 1984 deletions

View File

@ -1,38 +0,0 @@
import { $logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasCopiedToClipboard } from 'features/canvas/store/actions';
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
import { copyBlobToClipboard } from 'features/system/util/copyBlobToClipboard';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
export const addCanvasCopiedToClipboardListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasCopiedToClipboard,
effect: async (action, { getState }) => {
const moduleLog = $logger.get().child({ namespace: 'canvasCopiedToClipboardListener' });
const state = getState();
try {
const blob = getBaseLayerBlob(state);
copyBlobToClipboard(blob);
} catch (err) {
moduleLog.error(String(err));
toast({
id: 'CANVAS_COPY_FAILED',
title: t('toast.problemCopyingCanvas'),
description: t('toast.problemCopyingCanvasDesc'),
status: 'error',
});
return;
}
toast({
id: 'CANVAS_COPY_SUCCEEDED',
title: t('toast.canvasCopiedClipboard'),
status: 'success',
});
},
});
};

View File

@ -1,34 +0,0 @@
import { $logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasDownloadedAsImage } from 'features/canvas/store/actions';
import { downloadBlob } from 'features/canvas/util/downloadBlob';
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
export const addCanvasDownloadedAsImageListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasDownloadedAsImage,
effect: async (action, { getState }) => {
const moduleLog = $logger.get().child({ namespace: 'canvasSavedToGalleryListener' });
const state = getState();
let blob;
try {
blob = await getBaseLayerBlob(state);
} catch (err) {
moduleLog.error(String(err));
toast({
id: 'CANVAS_DOWNLOAD_FAILED',
title: t('toast.problemDownloadingCanvas'),
description: t('toast.problemDownloadingCanvasDesc'),
status: 'error',
});
return;
}
downloadBlob(blob, 'canvas.png');
toast({ id: 'CANVAS_DOWNLOAD_SUCCEEDED', title: t('toast.canvasDownloaded'), status: 'success' });
},
});
};

View File

@ -1,54 +0,0 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasImageToControlAdapter } from 'features/canvas/store/actions';
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
import { caImageChanged } from 'features/controlLayers/store/canvasV2Slice';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { imagesApi } from 'services/api/endpoints/images';
const log = logger('canvas');
export const addCanvasImageToControlNetListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasImageToControlAdapter,
effect: async (action, { dispatch, getState }) => {
const state = getState();
const { id } = action.payload;
let blob: Blob;
try {
blob = await getBaseLayerBlob(state, true);
} catch (err) {
log.error(String(err));
toast({
id: 'PROBLEM_SAVING_CANVAS',
title: t('toast.problemSavingCanvas'),
description: t('toast.problemSavingCanvasDesc'),
status: 'error',
});
return;
}
const { autoAddBoardId } = state.gallery;
const imageDTO = await dispatch(
imagesApi.endpoints.uploadImage.initiate({
file: new File([blob], 'savedCanvas.png', {
type: 'image/png',
}),
image_category: 'control',
is_intermediate: true,
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
crop_visible: false,
postUploadAction: {
type: 'TOAST',
title: t('toast.canvasSentControlnetAssets'),
},
})
).unwrap();
dispatch(caImageChanged({ id, imageDTO }));
},
});
};

View File

@ -1,60 +0,0 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasMaskSavedToGallery } from 'features/canvas/store/actions';
import { getCanvasData } from 'features/canvas/util/getCanvasData';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { imagesApi } from 'services/api/endpoints/images';
export const addCanvasMaskSavedToGalleryListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasMaskSavedToGallery,
effect: async (action, { dispatch, getState }) => {
const log = logger('canvas');
const state = getState();
const canvasBlobsAndImageData = await getCanvasData(
state.canvas.layerState,
state.canvas.boundingBoxCoordinates,
state.canvas.boundingBoxDimensions,
state.canvas.isMaskEnabled,
state.canvas.shouldPreserveMaskedArea
);
if (!canvasBlobsAndImageData) {
return;
}
const { maskBlob } = canvasBlobsAndImageData;
if (!maskBlob) {
log.error('Problem getting mask layer blob');
toast({
id: 'PROBLEM_SAVING_MASK',
title: t('toast.problemSavingMask'),
description: t('toast.problemSavingMaskDesc'),
status: 'error',
});
return;
}
const { autoAddBoardId } = state.gallery;
dispatch(
imagesApi.endpoints.uploadImage.initiate({
file: new File([maskBlob], 'canvasMaskImage.png', {
type: 'image/png',
}),
image_category: 'mask',
is_intermediate: false,
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
crop_visible: true,
postUploadAction: {
type: 'TOAST',
title: t('toast.maskSavedAssets'),
},
})
);
},
});
};

View File

@ -1,63 +0,0 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasMaskToControlAdapter } from 'features/canvas/store/actions';
import { getCanvasData } from 'features/canvas/util/getCanvasData';
import { caImageChanged } from 'features/controlLayers/store/canvasV2Slice';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { imagesApi } from 'services/api/endpoints/images';
export const addCanvasMaskToControlNetListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasMaskToControlAdapter,
effect: async (action, { dispatch, getState }) => {
const log = logger('canvas');
const state = getState();
const { id } = action.payload;
const canvasBlobsAndImageData = await getCanvasData(
state.canvas.layerState,
state.canvas.boundingBoxCoordinates,
state.canvas.boundingBoxDimensions,
state.canvas.isMaskEnabled,
state.canvas.shouldPreserveMaskedArea
);
if (!canvasBlobsAndImageData) {
return;
}
const { maskBlob } = canvasBlobsAndImageData;
if (!maskBlob) {
log.error('Problem getting mask layer blob');
toast({
id: 'PROBLEM_IMPORTING_MASK',
title: t('toast.problemImportingMask'),
description: t('toast.problemImportingMaskDesc'),
status: 'error',
});
return;
}
const { autoAddBoardId } = state.gallery;
const imageDTO = await dispatch(
imagesApi.endpoints.uploadImage.initiate({
file: new File([maskBlob], 'canvasMaskImage.png', {
type: 'image/png',
}),
image_category: 'mask',
is_intermediate: true,
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
crop_visible: false,
postUploadAction: {
type: 'TOAST',
title: t('toast.maskSentControlnetAssets'),
},
})
).unwrap();
dispatch(caImageChanged({ id, imageDTO }));
},
});
};

View File

@ -1,73 +0,0 @@
import { $logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasMerged } from 'features/canvas/store/actions';
import { $canvasBaseLayer } from 'features/canvas/store/canvasNanostore';
import { setMergedCanvas } from 'features/canvas/store/canvasSlice';
import { getFullBaseLayerBlob } from 'features/canvas/util/getFullBaseLayerBlob';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { imagesApi } from 'services/api/endpoints/images';
export const addCanvasMergedListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasMerged,
effect: async (action, { dispatch }) => {
const moduleLog = $logger.get().child({ namespace: 'canvasCopiedToClipboardListener' });
const blob = await getFullBaseLayerBlob();
if (!blob) {
moduleLog.error('Problem getting base layer blob');
toast({
id: 'PROBLEM_MERGING_CANVAS',
title: t('toast.problemMergingCanvas'),
description: t('toast.problemMergingCanvasDesc'),
status: 'error',
});
return;
}
const canvasBaseLayer = $canvasBaseLayer.get();
if (!canvasBaseLayer) {
moduleLog.error('Problem getting canvas base layer');
toast({
id: 'PROBLEM_MERGING_CANVAS',
title: t('toast.problemMergingCanvas'),
description: t('toast.problemMergingCanvasDesc'),
status: 'error',
});
return;
}
const baseLayerRect = canvasBaseLayer.getClientRect({
relativeTo: canvasBaseLayer.getParent() ?? undefined,
});
const imageDTO = await dispatch(
imagesApi.endpoints.uploadImage.initiate({
file: new File([blob], 'mergedCanvas.png', {
type: 'image/png',
}),
image_category: 'general',
is_intermediate: true,
postUploadAction: {
type: 'TOAST',
title: t('toast.canvasMerged'),
},
})
).unwrap();
// TODO: I can't figure out how to do the type narrowing in the `take()` so just brute forcing it here
const { image_name } = imageDTO;
dispatch(
setMergedCanvas({
kind: 'image',
layer: 'base',
imageName: image_name,
...baseLayerRect,
})
);
},
});
};

View File

@ -1,53 +0,0 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { parseify } from 'common/util/serialize';
import { canvasSavedToGallery } from 'features/canvas/store/actions';
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { imagesApi } from 'services/api/endpoints/images';
export const addCanvasSavedToGalleryListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasSavedToGallery,
effect: async (action, { dispatch, getState }) => {
const log = logger('canvas');
const state = getState();
let blob;
try {
blob = await getBaseLayerBlob(state);
} catch (err) {
log.error(String(err));
toast({
id: 'CANVAS_SAVE_FAILED',
title: t('toast.problemSavingCanvas'),
description: t('toast.problemSavingCanvasDesc'),
status: 'error',
});
return;
}
const { autoAddBoardId } = state.gallery;
dispatch(
imagesApi.endpoints.uploadImage.initiate({
file: new File([blob], 'savedCanvas.png', {
type: 'image/png',
}),
image_category: 'general',
is_intermediate: false,
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
crop_visible: true,
postUploadAction: {
type: 'TOAST',
title: t('toast.canvasSavedGallery'),
},
metadata: {
_canvas_objects: parseify(state.canvas.layerState.objects),
},
})
);
},
});
};

View File

@ -1,185 +0,0 @@
import { isAnyOf } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import type { AppDispatch } from 'app/store/store';
import type { SerializableObject } from 'common/types';
import { parseify } from 'common/util/serialize';
import {
caImageChanged,
caModelChanged,
caProcessedImageChanged,
caProcessorConfigChanged,
caProcessorPendingBatchIdChanged,
caRecalled,
} from 'features/controlLayers/store/canvasV2Slice';
import { selectCA } from 'features/controlLayers/store/controlAdaptersReducers';
import { IMAGE_FILTERS } from 'features/controlLayers/store/types';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { isEqual } from 'lodash-es';
import { getImageDTO } from 'services/api/endpoints/images';
import { queueApi } from 'services/api/endpoints/queue';
import type { BatchConfig } from 'services/api/types';
import { socketInvocationComplete } from 'services/events/actions';
import { assert } from 'tsafe';
const matcher = isAnyOf(caImageChanged, caProcessedImageChanged, caProcessorConfigChanged, caModelChanged, caRecalled);
const DEBOUNCE_MS = 300;
const log = logger('queue');
/**
* Simple helper to cancel a batch and reset the pending batch ID
*/
const cancelProcessorBatch = async (dispatch: AppDispatch, id: string, batchId: string) => {
const req = dispatch(queueApi.endpoints.cancelByBatchIds.initiate({ batch_ids: [batchId] }));
log.trace({ batchId }, 'Cancelling existing preprocessor batch');
try {
await req.unwrap();
} catch {
// no-op
} finally {
req.reset();
// Always reset the pending batch ID - the cancel req could fail if the batch doesn't exist
dispatch(caProcessorPendingBatchIdChanged({ id, batchId: null }));
}
};
export const addControlAdapterPreprocessor = (startAppListening: AppStartListening) => {
startAppListening({
matcher,
effect: async (action, { dispatch, getState, getOriginalState, cancelActiveListeners, delay, take, signal }) => {
const id = caRecalled.match(action) ? action.payload.data.id : action.payload.id;
const state = getState();
const originalState = getOriginalState();
// Cancel any in-progress instances of this listener
cancelActiveListeners();
log.trace('Control Layer CA auto-process triggered');
// Delay before starting actual work
await delay(DEBOUNCE_MS);
const ca = selectCA(state.canvasV2, id);
if (!ca) {
return;
}
// We should only process if the processor settings or image have changed
const originalCA = selectCA(originalState.canvasV2, id);
const originalImage = originalCA?.imageObject;
const originalConfig = originalCA?.processorConfig;
const image = ca.imageObject;
const processedImage = ca.processedImageObject;
const config = ca.processorConfig;
if (isEqual(config, originalConfig) && isEqual(image, originalImage) && processedImage) {
// Neither config nor image have changed, we can bail
return;
}
if (!image || !config) {
// - If we have no image, we have nothing to process
// - If we have no processor config, we have nothing to process
// Clear the processed image and bail
dispatch(caProcessedImageChanged({ id, imageDTO: null }));
return;
}
// At this point, the user has stopped fiddling with the processor settings and there is a processor selected.
// If there is a pending processor batch, cancel it.
if (ca.processorPendingBatchId) {
cancelProcessorBatch(dispatch, id, ca.processorPendingBatchId);
}
// TODO(psyche): I can't get TS to be happy, it thinkgs `config` is `never` but it should be inferred from the generic... I'll just cast it for now
const processorNode = IMAGE_FILTERS[config.type].buildNode(image.image, config as never);
const enqueueBatchArg: BatchConfig = {
prepend: true,
batch: {
graph: {
nodes: {
[processorNode.id]: {
...processorNode,
// Control images are always intermediate - do not save to gallery
is_intermediate: true,
},
},
edges: [],
},
runs: 1,
},
};
// Kick off the processor batch
const req = dispatch(
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, {
fixedCacheKey: 'enqueueBatch',
})
);
try {
const enqueueResult = await req.unwrap();
// TODO(psyche): Update the pydantic models, pretty sure we will _always_ have a batch_id here, but the model says it's optional
assert(enqueueResult.batch.batch_id, 'Batch ID not returned from queue');
dispatch(caProcessorPendingBatchIdChanged({ id, batchId: enqueueResult.batch.batch_id }));
log.debug({ enqueueResult } as SerializableObject, t('queue.graphQueued'));
// Wait for the processor node to complete
const [invocationCompleteAction] = await take(
(action): action is ReturnType<typeof socketInvocationComplete> =>
socketInvocationComplete.match(action) &&
action.payload.data.batch_id === enqueueResult.batch.batch_id &&
action.payload.data.invocation_source_id === processorNode.id
);
// We still have to check the output type
assert(
invocationCompleteAction.payload.data.result.type === 'image_output',
`Processor did not return an image output, got: ${invocationCompleteAction.payload.data.result}`
);
const { image_name } = invocationCompleteAction.payload.data.result.image;
const imageDTO = await getImageDTO(image_name);
assert(imageDTO, "Failed to fetch processor output's image DTO");
// Whew! We made it. Update the layer with the processed image
log.debug({ id, imageDTO }, 'ControlNet image processed');
dispatch(caProcessedImageChanged({ id, imageDTO }));
dispatch(caProcessorPendingBatchIdChanged({ id, batchId: null }));
} catch (error) {
if (signal.aborted) {
// The listener was canceled - we need to cancel the pending processor batch, if there is one (could have changed by now).
const pendingBatchId = selectCA(getState().canvasV2, id)?.processorPendingBatchId;
if (pendingBatchId) {
cancelProcessorBatch(dispatch, id, pendingBatchId);
}
log.trace('Control Adapter preprocessor cancelled');
} else {
// Some other error condition...
log.error({ enqueueBatchArg: parseify(enqueueBatchArg) }, t('queue.graphFailedToQueue'));
if (error instanceof Object) {
if ('data' in error && 'status' in error) {
if (error.status === 403) {
dispatch(caImageChanged({ id, imageDTO: null }));
return;
}
}
}
toast({
id: 'GRAPH_QUEUE_FAILED',
title: t('queue.graphFailedToQueue'),
status: 'error',
});
}
} finally {
req.reset();
}
},
});
};

View File

@ -1,145 +0,0 @@
import { logger } from 'app/logging/logger';
import { enqueueRequested } from 'app/store/actions';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import type { SerializableObject } from 'common/types';
import openBase64ImageInTab from 'common/util/openBase64ImageInTab';
import { canvasBatchIdAdded, stagingAreaInitialized } from 'features/canvas/store/canvasSlice';
import { getCanvasData } from 'features/canvas/util/getCanvasData';
import { getCanvasGenerationMode } from 'features/canvas/util/getCanvasGenerationMode';
import { blobToDataURL } from 'features/controlLayers/konva/util';
import { canvasGraphBuilt } from 'features/nodes/store/actions';
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
import { buildCanvasGraph } from 'features/nodes/util/graph/canvas/buildCanvasGraph';
import { imagesApi } from 'services/api/endpoints/images';
import { queueApi } from 'services/api/endpoints/queue';
import type { ImageDTO } from 'services/api/types';
const log = logger('queue');
/**
* This listener is responsible invoking the canvas. This involves a number of steps:
*
* 1. Generate image blobs from the canvas layers
* 2. Determine the generation mode from the layers (txt2img, img2img, inpaint)
* 3. Build the canvas graph
* 4. Create the session with the graph
* 5. Upload the init image if necessary
* 6. Upload the mask image if necessary
* 7. Update the init and mask images with the session ID
* 8. Initialize the staging area if not yet initialized
* 9. Dispatch the sessionReadyToInvoke action to invoke the session
*/
export const addEnqueueRequestedCanvasListener = (startAppListening: AppStartListening) => {
startAppListening({
predicate: (action): action is ReturnType<typeof enqueueRequested> =>
enqueueRequested.match(action) && action.payload.tabName === 'canvas',
effect: async (action, { getState, dispatch }) => {
const { prepend } = action.payload;
const state = getState();
const { layerState, boundingBoxCoordinates, boundingBoxDimensions, isMaskEnabled, shouldPreserveMaskedArea } =
state.canvas;
// Build canvas blobs
const canvasBlobsAndImageData = await getCanvasData(
layerState,
boundingBoxCoordinates,
boundingBoxDimensions,
isMaskEnabled,
shouldPreserveMaskedArea
);
if (!canvasBlobsAndImageData) {
log.error('Unable to create canvas data');
return;
}
const { baseBlob, baseImageData, maskBlob, maskImageData } = canvasBlobsAndImageData;
// Determine the generation mode
const generationMode = getCanvasGenerationMode(baseImageData, maskImageData);
if (state.system.enableImageDebugging) {
const baseDataURL = await blobToDataURL(baseBlob);
const maskDataURL = await blobToDataURL(maskBlob);
openBase64ImageInTab([
{ base64: maskDataURL, caption: 'mask b64' },
{ base64: baseDataURL, caption: 'image b64' },
]);
}
log.debug(`Generation mode: ${generationMode}`);
// Temp placeholders for the init and mask images
let canvasInitImage: ImageDTO | undefined;
let canvasMaskImage: ImageDTO | undefined;
// For img2img and inpaint/outpaint, we need to upload the init images
if (['img2img', 'inpaint', 'outpaint'].includes(generationMode)) {
// upload the image, saving the request id
canvasInitImage = await dispatch(
imagesApi.endpoints.uploadImage.initiate({
file: new File([baseBlob], 'canvasInitImage.png', {
type: 'image/png',
}),
image_category: 'general',
is_intermediate: true,
})
).unwrap();
}
// For inpaint/outpaint, we also need to upload the mask layer
if (['inpaint', 'outpaint'].includes(generationMode)) {
// upload the image, saving the request id
canvasMaskImage = await dispatch(
imagesApi.endpoints.uploadImage.initiate({
file: new File([maskBlob], 'canvasMaskImage.png', {
type: 'image/png',
}),
image_category: 'mask',
is_intermediate: true,
})
).unwrap();
}
const graph = await buildCanvasGraph(state, generationMode, canvasInitImage, canvasMaskImage);
log.debug({ graph } as SerializableObject, `Canvas graph built`);
// currently this action is just listened to for logging
dispatch(canvasGraphBuilt(graph));
const batchConfig = prepareLinearUIBatch(state, graph, prepend);
try {
const req = dispatch(
queueApi.endpoints.enqueueBatch.initiate(batchConfig, {
fixedCacheKey: 'enqueueBatch',
})
);
const enqueueResult = await req.unwrap();
req.reset();
const batchId = enqueueResult.batch.batch_id as string; // we know the is a string, backend provides it
// Prep the canvas staging area if it is not yet initialized
if (!state.canvas.layerState.stagingArea.boundingBox) {
dispatch(
stagingAreaInitialized({
boundingBox: {
...state.canvas.boundingBoxCoordinates,
...state.canvas.boundingBoxDimensions,
},
})
);
}
// Associate the session with the canvas session ID
dispatch(canvasBatchIdAdded(batchId));
} catch {
// no-op
}
},
});
};

View File

@ -1,43 +0,0 @@
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { stagingAreaImageSaved } from 'features/canvas/store/actions';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { imagesApi } from 'services/api/endpoints/images';
export const addStagingAreaImageSavedListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: stagingAreaImageSaved,
effect: async (action, { dispatch, getState }) => {
const { imageDTO } = action.payload;
try {
const newImageDTO = await dispatch(
imagesApi.endpoints.changeImageIsIntermediate.initiate({
imageDTO,
is_intermediate: false,
})
).unwrap();
// we may need to add it to the autoadd board
const { autoAddBoardId } = getState().gallery;
if (autoAddBoardId && autoAddBoardId !== 'none') {
await dispatch(
imagesApi.endpoints.addImageToBoard.initiate({
imageDTO: newImageDTO,
board_id: autoAddBoardId,
})
);
}
toast({ id: 'IMAGE_SAVED', title: t('toast.imageSaved'), status: 'success' });
} catch (error) {
toast({
id: 'IMAGE_SAVE_FAILED',
title: t('toast.imageSavingFailed'),
description: (error as Error)?.message,
status: 'error',
});
}
},
});
};

View File

@ -1,35 +0,0 @@
// https://stackoverflow.com/a/73731908
import { useCallback, useEffect, useState } from 'react';
type UseSingleAndDoubleClickOptions = {
onSingleClick: () => void;
onDoubleClick: () => void;
latency?: number;
};
export function useSingleAndDoubleClick({
onSingleClick,
onDoubleClick,
latency = 250,
}: UseSingleAndDoubleClickOptions): () => void {
const [click, setClick] = useState(0);
useEffect(() => {
const timer = setTimeout(() => {
if (click === 1) {
onSingleClick();
}
setClick(0);
}, latency);
if (click === 2) {
onDoubleClick();
}
return () => clearTimeout(timer);
}, [click, onDoubleClick, latency, onSingleClick]);
const onClick = useCallback(() => setClick((prev) => prev + 1), []);
return onClick;
}

View File

@ -1,7 +0,0 @@
export const isInputElement = (el: HTMLElement) => {
return (
el.tagName.toLowerCase() === 'input' ||
el.tagName.toLowerCase() === 'textarea' ||
el.tagName.toLowerCase() === 'select'
);
};

View File

@ -1,23 +0,0 @@
type Base64AndCaption = {
base64: string;
caption: string;
};
const openBase64ImageInTab = (images: Base64AndCaption[]) => {
const w = window.open('');
if (!w) {
return;
}
images.forEach((i) => {
const image = new Image();
image.src = i.base64;
w.document.write(i.caption);
w.document.write('</br>');
w.document.write(image.outerHTML);
w.document.write('</br></br>');
});
};
export default openBase64ImageInTab;

View File

@ -1,30 +0,0 @@
import { IconButton } from '@invoke-ai/ui-library';
import { useAppDispatch } from 'app/store/storeHooks';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { entityDeleted } from 'features/controlLayers/store/canvasV2Slice';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiTrashSimpleBold } from 'react-icons/pi';
export const CanvasEntityDeleteButton = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const entityIdentifier = useEntityIdentifierContext();
const onClick = useCallback(() => {
dispatch(entityDeleted({ entityIdentifier }));
}, [dispatch, entityIdentifier]);
return (
<IconButton
size="sm"
colorScheme="error"
aria-label={t('common.delete')}
tooltip={t('common.delete')}
icon={<PiTrashSimpleBold />}
onClick={onClick}
variant="link"
alignSelf="stretch"
/>
);
});
CanvasEntityDeleteButton.displayName = 'CanvasEntityDeleteButton';

View File

@ -1,18 +0,0 @@
import { IconButton, MenuButton } from '@invoke-ai/ui-library';
import { stopPropagation } from 'common/util/stopPropagation';
import { memo } from 'react';
import { PiDotsThreeVerticalBold } from 'react-icons/pi';
export const CanvasEntityMenuButton = memo(() => {
return (
<MenuButton
as={IconButton}
aria-label="Layer menu"
size="sm"
icon={<PiDotsThreeVerticalBold />}
onDoubleClick={stopPropagation} // double click expands the layer
/>
);
});
CanvasEntityMenuButton.displayName = 'CanvasEntityMenuButton';

View File

@ -1,25 +0,0 @@
import { MenuItem } from '@invoke-ai/ui-library';
import { useAppDispatch } from 'app/store/storeHooks';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { entityReset } from 'features/controlLayers/store/canvasV2Slice';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiArrowCounterClockwiseBold } from 'react-icons/pi';
export const CanvasEntityMenuItemsReset = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const entityIdentifier = useEntityIdentifierContext();
const resetEntity = useCallback(() => {
dispatch(entityReset({ entityIdentifier }));
}, [dispatch, entityIdentifier]);
return (
<MenuItem onClick={resetEntity} icon={<PiArrowCounterClockwiseBold />}>
{t('accessibility.reset')}
</MenuItem>
);
});
CanvasEntityMenuItemsReset.displayName = 'CanvasEntityMenuItemsReset';

View File

@ -1,161 +0,0 @@
import { CanvasEntity } from 'features/controlLayers/konva/CanvasEntity';
import { CanvasImageRenderer } from 'features/controlLayers/konva/CanvasImage';
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { CanvasTransformer } from 'features/controlLayers/konva/CanvasTransformer';
import { type CanvasControlAdapterState, isDrawingTool } from 'features/controlLayers/store/types';
import Konva from 'konva';
export class CanvasControlAdapter extends CanvasEntity {
static NAME_PREFIX = 'control-adapter';
static LAYER_NAME = `${CanvasControlAdapter.NAME_PREFIX}_layer`;
static TRANSFORMER_NAME = `${CanvasControlAdapter.NAME_PREFIX}_transformer`;
static GROUP_NAME = `${CanvasControlAdapter.NAME_PREFIX}_group`;
static OBJECT_GROUP_NAME = `${CanvasControlAdapter.NAME_PREFIX}_object-group`;
static TYPE = 'control_adapter' as const;
type = CanvasControlAdapter.TYPE;
_state: CanvasControlAdapterState;
konva: {
layer: Konva.Layer;
group: Konva.Group;
objectGroup: Konva.Group;
};
image: CanvasImageRenderer | null;
transformer: CanvasTransformer;
constructor(state: CanvasControlAdapterState, manager: CanvasManager) {
super(state.id, manager);
this.konva = {
layer: new Konva.Layer({
name: CanvasControlAdapter.LAYER_NAME,
imageSmoothingEnabled: false,
listening: false,
}),
group: new Konva.Group({
name: CanvasControlAdapter.GROUP_NAME,
listening: false,
}),
objectGroup: new Konva.Group({ name: CanvasControlAdapter.GROUP_NAME, listening: false }),
};
this.transformer = new CanvasTransformer(this);
this.konva.group.add(this.konva.objectGroup);
this.konva.layer.add(this.konva.group);
this.konva.layer.add(this.konva.transformer);
this.image = null;
this._state = state;
}
async render(state: CanvasControlAdapterState) {
this._state = state;
// Update the layer's position and listening state
this.konva.group.setAttrs({
x: state.position.x,
y: state.position.y,
scaleX: 1,
scaleY: 1,
});
const imageObject = state.processedImageObject ?? state.imageObject;
let didDraw = false;
if (!imageObject) {
if (this.image) {
this.image.konva.group.visible(false);
didDraw = true;
}
} else if (!this.image) {
this.image = new CanvasImageRenderer(imageObject, this);
this.updateGroup(true);
this.konva.objectGroup.add(this.image.konva.group);
await this.image.updateImageSource(imageObject.image.image_name);
} else if (!this.image.isLoading && !this.image.isError) {
if (await this.image.update(imageObject)) {
didDraw = true;
}
}
this.updateGroup(didDraw);
}
updateGroup(didDraw: boolean) {
this.konva.layer.visible(this._state.isEnabled);
this.konva.group.opacity(this._state.opacity);
const isSelected = this.manager.stateApi.getIsSelected(this.id);
const selectedTool = this.manager.stateApi.getToolState().selected;
if (!this.image?.konva.image) {
// If the layer is totally empty, reset the cache and bail out.
this.konva.layer.listening(false);
this.konva.transformer.nodes([]);
if (this.konva.group.isCached()) {
this.konva.group.clearCache();
}
return;
}
if (isSelected && selectedTool === 'move') {
// When the layer is selected and being moved, we should always cache it.
// We should update the cache if we drew to the layer.
if (!this.konva.group.isCached() || didDraw) {
this.konva.group.cache();
}
// Activate the transformer
this.konva.layer.listening(true);
this.konva.transformer.nodes([this.konva.group]);
this.konva.transformer.forceUpdate();
return;
}
if (isSelected && selectedTool !== 'move') {
// If the layer is selected but not using the move tool, we don't want the layer to be listening.
this.konva.layer.listening(false);
// The transformer also does not need to be active.
this.konva.transformer.nodes([]);
if (isDrawingTool(selectedTool)) {
// We are using a drawing tool (brush, eraser, rect). These tools change the layer's rendered appearance, so we
// should never be cached.
if (this.konva.group.isCached()) {
this.konva.group.clearCache();
}
} else {
// We are using a non-drawing tool (move, view, bbox), so we should cache the layer.
// We should update the cache if we drew to the layer.
if (!this.konva.group.isCached() || didDraw) {
this.konva.group.cache();
}
}
return;
}
if (!isSelected) {
// Unselected layers should not be listening
this.konva.layer.listening(false);
// The transformer also does not need to be active.
this.konva.transformer.nodes([]);
// Update the layer's cache if it's not already cached or we drew to it.
if (!this.konva.group.isCached() || didDraw) {
this.konva.group.cache();
}
return;
}
}
destroy(): void {
this.konva.layer.destroy();
}
repr() {
return {
id: this.id,
type: this.type,
state: this._state,
};
}
}

View File

@ -1,166 +0,0 @@
import type { RootState } from 'app/store/store';
import { deepClone } from 'common/util/deepClone';
import { roundToMultiple } from 'common/util/roundDownToMultiple';
import { selectOptimalDimension } from 'features/controlLayers/store/selectors';
import {
DENOISE_LATENTS_HRF,
ESRGAN_HRF,
IMAGE_TO_LATENTS_HRF,
LATENTS_TO_IMAGE_HRF_HR,
LATENTS_TO_IMAGE_HRF_LR,
NOISE_HRF,
RESIZE_HRF,
} from 'features/nodes/util/graph/constants';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getBoardField } from 'features/nodes/util/graph/graphBuilderUtils';
import type { Invocation } from 'services/api/types';
/**
* Calculates the new resolution for high-resolution features (HRF) based on base model type.
* Adjusts the width and height to maintain the aspect ratio and constrains them by the model's dimension limits,
* rounding down to the nearest multiple of 8.
*
* @param {number} optimalDimension The optimal dimension for the base model.
* @param {number} width The current width to be adjusted for HRF.
* @param {number} height The current height to be adjusted for HRF.
* @return {{newWidth: number, newHeight: number}} The new width and height, adjusted and rounded as needed.
*/
function calculateHrfRes(
optimalDimension: number,
width: number,
height: number
): { newWidth: number; newHeight: number } {
const aspect = width / height;
const minDimension = Math.floor(optimalDimension * 0.5);
const modelArea = optimalDimension * optimalDimension; // Assuming square images for model_area
let initWidth;
let initHeight;
if (aspect > 1.0) {
initHeight = Math.max(minDimension, Math.sqrt(modelArea / aspect));
initWidth = initHeight * aspect;
} else {
initWidth = Math.max(minDimension, Math.sqrt(modelArea * aspect));
initHeight = initWidth / aspect;
}
// Cap initial height and width to final height and width.
initWidth = Math.min(width, initWidth);
initHeight = Math.min(height, initHeight);
const newWidth = roundToMultiple(Math.floor(initWidth), 8);
const newHeight = roundToMultiple(Math.floor(initHeight), 8);
return { newWidth, newHeight };
}
/**
* Adds HRF to the graph.
* @param state The root redux state
* @param g The graph to add HRF to
* @param denoise The denoise node
* @param noise The noise node
* @param l2i The l2i node
* @param vaeSource The VAE source node (may be a model loader, VAE loader, or seamless node)
* @returns The HRF image output node.
*/
export const addHRF = (
state: RootState,
g: Graph,
denoise: Invocation<'denoise_latents'>,
noise: Invocation<'noise'>,
l2i: Invocation<'l2i'>,
vaeSource: Invocation<'vae_loader'> | Invocation<'main_model_loader'> | Invocation<'seamless'>
): Invocation<'l2i'> => {
const { hrfStrength, hrfEnabled, hrfMethod } = state.hrf;
const { width, height } = state.canvasV2.document;
const optimalDimension = selectOptimalDimension(state);
const { newWidth: hrfWidth, newHeight: hrfHeight } = calculateHrfRes(optimalDimension, width, height);
// Change height and width of original noise node to initial resolution.
if (noise) {
noise.width = hrfWidth;
noise.height = hrfHeight;
}
// Define new nodes and their connections, roughly in order of operations.
const l2iHrfLR = g.addNode({ type: 'l2i', id: LATENTS_TO_IMAGE_HRF_LR, fp32: l2i.fp32 });
g.addEdge(denoise, 'latents', l2iHrfLR, 'latents');
g.addEdge(vaeSource, 'vae', l2iHrfLR, 'vae');
const resizeHrf = g.addNode({
id: RESIZE_HRF,
type: 'img_resize',
width: width,
height: height,
});
if (hrfMethod === 'ESRGAN') {
let model_name: Invocation<'esrgan'>['model_name'] = 'RealESRGAN_x2plus.pth';
if ((width * height) / (hrfWidth * hrfHeight) > 2) {
model_name = 'RealESRGAN_x4plus.pth';
}
const esrganHrf = g.addNode({ id: ESRGAN_HRF, type: 'esrgan', model_name });
g.addEdge(l2iHrfLR, 'image', esrganHrf, 'image');
g.addEdge(esrganHrf, 'image', resizeHrf, 'image');
} else {
g.addEdge(l2iHrfLR, 'image', resizeHrf, 'image');
}
const noiseHrf = g.addNode({
type: 'noise',
id: NOISE_HRF,
seed: noise.seed,
use_cpu: noise.use_cpu,
});
g.addEdge(resizeHrf, 'height', noiseHrf, 'height');
g.addEdge(resizeHrf, 'width', noiseHrf, 'width');
const i2lHrf = g.addNode({ type: 'i2l', id: IMAGE_TO_LATENTS_HRF });
g.addEdge(vaeSource, 'vae', i2lHrf, 'vae');
g.addEdge(resizeHrf, 'image', i2lHrf, 'image');
const denoiseHrf = g.addNode({
type: 'denoise_latents',
id: DENOISE_LATENTS_HRF,
cfg_scale: denoise.cfg_scale,
scheduler: denoise.scheduler,
steps: denoise.steps,
denoising_start: 1 - hrfStrength,
denoising_end: 1,
});
g.addEdge(i2lHrf, 'latents', denoiseHrf, 'latents');
g.addEdge(noiseHrf, 'noise', denoiseHrf, 'noise');
// Copy edges to the original denoise into the new denoise
g.getEdgesTo(denoise, ['control', 'ip_adapter', 'unet', 'positive_conditioning', 'negative_conditioning']).forEach(
(edge) => {
const clone = deepClone(edge);
clone.destination.node_id = denoiseHrf.id;
g.addEdgeFromObj(clone);
}
);
// The original l2i node is unnecessary now, remove it
g.deleteNode(l2i.id);
const l2iHrfHR = g.addNode({
type: 'l2i',
id: LATENTS_TO_IMAGE_HRF_HR,
fp32: l2i.fp32,
is_intermediate: false,
board: getBoardField(state),
});
g.addEdge(vaeSource, 'vae', l2iHrfHR, 'vae');
g.addEdge(denoiseHrf, 'latents', l2iHrfHR, 'latents');
g.upsertMetadata({
hrf_strength: hrfStrength,
hrf_enabled: hrfEnabled,
hrf_method: hrfMethod,
});
g.setMetadataReceivingNode(l2iHrfHR);
return l2iHrfHR;
};

View File

@ -1,5 +0,0 @@
import type { CanvasRasterLayerState } from 'features/controlLayers/store/types';
export const isValidLayer = (layer: CanvasRasterLayerState) => {
return layer.isEnabled && layer.objects.length > 0;
};

View File

@ -1,197 +0,0 @@
import type { RootState } from 'app/store/store';
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetchingHelpers';
import {
LATENTS_TO_IMAGE,
NEGATIVE_CONDITIONING,
NEGATIVE_CONDITIONING_COLLECT,
NOISE,
POSITIVE_CONDITIONING,
POSITIVE_CONDITIONING_COLLECT,
SDXL_CONTROL_LAYERS_GRAPH,
SDXL_DENOISE_LATENTS,
SDXL_MODEL_LOADER,
VAE_LOADER,
} from 'features/nodes/util/graph/constants';
import { addControlAdapters } from 'features/nodes/util/graph/generation/addControlAdapters';
import { addIPAdapters } from 'features/nodes/util/graph/generation/addIPAdapters';
import { addNSFWChecker } from 'features/nodes/util/graph/generation/addNSFWChecker';
import { addSDXLLoRAs } from 'features/nodes/util/graph/generation/addSDXLLoRAs';
import { addSDXLRefiner } from 'features/nodes/util/graph/generation/addSDXLRefiner';
import { addSeamless } from 'features/nodes/util/graph/generation/addSeamless';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getBoardField, getPresetModifiedPrompts , getSizes } from 'features/nodes/util/graph/graphBuilderUtils';
import type { Invocation, NonNullableGraph } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import { assert } from 'tsafe';
import { addRegions } from './addRegions';
export const buildImageToImageSDXLGraph = async (
state: RootState,
manager: CanvasManager
): Promise<NonNullableGraph> => {
const { bbox, params } = state.canvasV2;
const {
model,
cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
scheduler,
seed,
steps,
shouldUseCpuNoise,
vaePrecision,
vae,
refinerModel,
refinerStart,
img2imgStrength,
} = params;
assert(model, 'No model found in state');
const { positivePrompt, negativePrompt, positiveStylePrompt, negativeStylePrompt } = getPresetModifiedPrompts(state);
const { originalSize, scaledSize } = getSizes(bbox);
const g = new Graph(SDXL_CONTROL_LAYERS_GRAPH);
const modelLoader = g.addNode({
type: 'sdxl_model_loader',
id: SDXL_MODEL_LOADER,
model,
});
const posCond = g.addNode({
type: 'sdxl_compel_prompt',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
style: positiveStylePrompt,
});
const posCondCollect = g.addNode({
type: 'collect',
id: POSITIVE_CONDITIONING_COLLECT,
});
const negCond = g.addNode({
type: 'sdxl_compel_prompt',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
style: negativeStylePrompt,
});
const negCondCollect = g.addNode({
type: 'collect',
id: NEGATIVE_CONDITIONING_COLLECT,
});
const noise = g.addNode({
type: 'noise',
id: NOISE,
seed,
width: scaledSize.width,
height: scaledSize.height,
use_cpu: shouldUseCpuNoise,
});
const denoise = g.addNode({
type: 'denoise_latents',
id: SDXL_DENOISE_LATENTS,
cfg_scale,
cfg_rescale_multiplier,
scheduler,
steps,
denoising_start: refinerModel ? Math.min(refinerStart, 1 - img2imgStrength) : 1 - img2imgStrength,
denoising_end: refinerModel ? refinerStart : 1,
});
const l2i = g.addNode({
type: 'l2i',
id: LATENTS_TO_IMAGE,
fp32: vaePrecision === 'fp32',
board: getBoardField(state),
// This is the terminal node and must always save to gallery.
is_intermediate: false,
use_cache: false,
});
const vaeLoader =
vae?.base === model.base
? g.addNode({
type: 'vae_loader',
id: VAE_LOADER,
vae_model: vae,
})
: null;
let imageOutput: Invocation<'l2i'> | Invocation<'img_nsfw'> | Invocation<'img_watermark'> | Invocation<'img_resize'> =
l2i;
g.addEdge(modelLoader, 'unet', denoise, 'unet');
g.addEdge(modelLoader, 'clip', posCond, 'clip');
g.addEdge(modelLoader, 'clip', negCond, 'clip');
g.addEdge(modelLoader, 'clip2', posCond, 'clip2');
g.addEdge(modelLoader, 'clip2', negCond, 'clip2');
g.addEdge(posCond, 'conditioning', posCondCollect, 'item');
g.addEdge(negCond, 'conditioning', negCondCollect, 'item');
g.addEdge(posCondCollect, 'collection', denoise, 'positive_conditioning');
g.addEdge(negCondCollect, 'collection', denoise, 'negative_conditioning');
g.addEdge(noise, 'noise', denoise, 'noise');
g.addEdge(denoise, 'latents', l2i, 'latents');
const modelConfig = await fetchModelConfigWithTypeGuard(model.key, isNonRefinerMainModelConfig);
assert(modelConfig.base === 'sdxl');
g.upsertMetadata({
generation_mode: 'sdxl_txt2img',
cfg_scale,
cfg_rescale_multiplier,
width: scaledSize.width,
height: scaledSize.height,
positive_prompt: positivePrompt,
negative_prompt: negativePrompt,
model: Graph.getModelMetadataField(modelConfig),
seed,
steps,
rand_device: shouldUseCpuNoise ? 'cpu' : 'cuda',
scheduler,
positive_style_prompt: positiveStylePrompt,
negative_style_prompt: negativeStylePrompt,
vae: vae ?? undefined,
});
const seamless = addSeamless(state, g, denoise, modelLoader, vaeLoader);
addSDXLLoRAs(state, g, denoise, modelLoader, seamless, posCond, negCond);
// We might get the VAE from the main model, custom VAE, or seamless node.
const vaeSource = seamless ?? vaeLoader ?? modelLoader;
g.addEdge(vaeSource, 'vae', l2i, 'vae');
// Add Refiner if enabled
if (refinerModel) {
await addSDXLRefiner(state, g, denoise, seamless, posCond, negCond, l2i);
}
const _addedCAs = addControlAdapters(state.canvasV2.controlAdapters.entities, g, denoise, modelConfig.base);
const _addedIPAs = addIPAdapters(state.canvasV2.ipAdapters.entities, g, denoise, modelConfig.base);
const _addedRegions = await addRegions(
manager,
state.canvasV2.regions.entities,
g,
state.canvasV2.document,
state.canvasV2.bbox,
modelConfig.base,
denoise,
posCond,
negCond,
posCondCollect,
negCondCollect
);
if (state.system.shouldUseNSFWChecker) {
imageOutput = addNSFWChecker(g, imageOutput);
}
if (state.system.shouldUseWatermarker) {
imageOutput = addWatermarker(g, imageOutput);
}
g.setMetadataReceivingNode(imageOutput);
return g.getGraph();
};

View File

@ -1,205 +0,0 @@
import type { RootState } from 'app/store/store';
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetchingHelpers';
import {
CLIP_SKIP,
CONTROL_LAYERS_GRAPH,
DENOISE_LATENTS,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
NEGATIVE_CONDITIONING,
NEGATIVE_CONDITIONING_COLLECT,
NOISE,
POSITIVE_CONDITIONING,
POSITIVE_CONDITIONING_COLLECT,
VAE_LOADER,
} from 'features/nodes/util/graph/constants';
import { addControlAdapters } from 'features/nodes/util/graph/generation/addControlAdapters';
// import { addHRF } from 'features/nodes/util/graph/generation/addHRF';
import { addIPAdapters } from 'features/nodes/util/graph/generation/addIPAdapters';
import { addLoRAs } from 'features/nodes/util/graph/generation/addLoRAs';
import { addNSFWChecker } from 'features/nodes/util/graph/generation/addNSFWChecker';
import { addSeamless } from 'features/nodes/util/graph/generation/addSeamless';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import type { GraphType } from 'features/nodes/util/graph/generation/Graph';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getBoardField, getPresetModifiedPrompts , getSizes } from 'features/nodes/util/graph/graphBuilderUtils';
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import { assert } from 'tsafe';
import { addRegions } from './addRegions';
export const buildTextToImageSD1SD2Graph = async (state: RootState, manager: CanvasManager): Promise<GraphType> => {
const { bbox, params } = state.canvasV2;
const {
model,
cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
scheduler,
steps,
clipSkip: skipped_layers,
shouldUseCpuNoise,
vaePrecision,
seed,
vae,
} = params;
assert(model, 'No model found in state');
const { positivePrompt, negativePrompt } = getPresetModifiedPrompts(state);
const { originalSize, scaledSize } = getSizes(bbox);
const g = new Graph(CONTROL_LAYERS_GRAPH);
const modelLoader = g.addNode({
type: 'main_model_loader',
id: MAIN_MODEL_LOADER,
model,
});
const clipSkip = g.addNode({
type: 'clip_skip',
id: CLIP_SKIP,
skipped_layers,
});
const posCond = g.addNode({
type: 'compel',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
});
const posCondCollect = g.addNode({
type: 'collect',
id: POSITIVE_CONDITIONING_COLLECT,
});
const negCond = g.addNode({
type: 'compel',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
});
const negCondCollect = g.addNode({
type: 'collect',
id: NEGATIVE_CONDITIONING_COLLECT,
});
const noise = g.addNode({
type: 'noise',
id: NOISE,
seed,
width: scaledSize.width,
height: scaledSize.height,
use_cpu: shouldUseCpuNoise,
});
const denoise = g.addNode({
type: 'denoise_latents',
id: DENOISE_LATENTS,
cfg_scale,
cfg_rescale_multiplier,
scheduler,
steps,
denoising_start: 0,
denoising_end: 1,
});
const l2i = g.addNode({
type: 'l2i',
id: LATENTS_TO_IMAGE,
fp32: vaePrecision === 'fp32',
board: getBoardField(state),
// This is the terminal node and must always save to gallery.
is_intermediate: false,
use_cache: false,
});
const vaeLoader =
vae?.base === model.base
? g.addNode({
type: 'vae_loader',
id: VAE_LOADER,
vae_model: vae,
})
: null;
let imageOutput: Invocation<'l2i'> | Invocation<'img_nsfw'> | Invocation<'img_watermark'> | Invocation<'img_resize'> =
l2i;
g.addEdge(modelLoader, 'unet', denoise, 'unet');
g.addEdge(modelLoader, 'clip', clipSkip, 'clip');
g.addEdge(clipSkip, 'clip', posCond, 'clip');
g.addEdge(clipSkip, 'clip', negCond, 'clip');
g.addEdge(posCond, 'conditioning', posCondCollect, 'item');
g.addEdge(negCond, 'conditioning', negCondCollect, 'item');
g.addEdge(posCondCollect, 'collection', denoise, 'positive_conditioning');
g.addEdge(negCondCollect, 'collection', denoise, 'negative_conditioning');
g.addEdge(noise, 'noise', denoise, 'noise');
g.addEdge(denoise, 'latents', l2i, 'latents');
const modelConfig = await fetchModelConfigWithTypeGuard(model.key, isNonRefinerMainModelConfig);
assert(modelConfig.base === 'sd-1' || modelConfig.base === 'sd-2');
g.upsertMetadata({
generation_mode: 'txt2img',
cfg_scale,
cfg_rescale_multiplier,
width: scaledSize.width,
height: scaledSize.height,
positive_prompt: positivePrompt,
negative_prompt: negativePrompt,
model: Graph.getModelMetadataField(modelConfig),
seed,
steps,
rand_device: shouldUseCpuNoise ? 'cpu' : 'cuda',
scheduler,
clip_skip: skipped_layers,
vae: vae ?? undefined,
});
const seamless = addSeamless(state, g, denoise, modelLoader, vaeLoader);
addLoRAs(state, g, denoise, modelLoader, seamless, clipSkip, posCond, negCond);
// We might get the VAE from the main model, custom VAE, or seamless node.
const vaeSource = seamless ?? vaeLoader ?? modelLoader;
g.addEdge(vaeSource, 'vae', l2i, 'vae');
if (!isEqual(scaledSize, originalSize)) {
// We are using scaled bbox and need to resize the output image back to the original size.
imageOutput = g.addNode({
id: 'img_resize',
type: 'img_resize',
...originalSize,
is_intermediate: false,
use_cache: false,
});
g.addEdge(l2i, 'image', imageOutput, 'image');
}
const _addedCAs = addControlAdapters(state.canvasV2.controlAdapters.entities, g, denoise, modelConfig.base);
const _addedIPAs = addIPAdapters(state.canvasV2.ipAdapters.entities, g, denoise, modelConfig.base);
const _addedRegions = await addRegions(
manager,
state.canvasV2.regions.entities,
g,
state.canvasV2.document,
state.canvasV2.bbox,
modelConfig.base,
denoise,
posCond,
negCond,
posCondCollect,
negCondCollect
);
// const isHRFAllowed = !addedLayers.some((l) => isInitialImageLayer(l) || isRegionalGuidanceLayer(l));
// if (isHRFAllowed && state.hrf.hrfEnabled) {
// imageOutput = addHRF(state, g, denoise, noise, l2i, vaeSource);
// }
if (state.system.shouldUseNSFWChecker) {
imageOutput = addNSFWChecker(g, imageOutput);
}
if (state.system.shouldUseWatermarker) {
imageOutput = addWatermarker(g, imageOutput);
}
g.setMetadataReceivingNode(imageOutput);
return g.getGraph();
};

View File

@ -1,53 +0,0 @@
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { isStagingSelector } from 'features/canvas/store/canvasSelectors';
import { selectOptimalDimension } from 'features/controlLayers/store/selectors';
import { useImageSizeContext } from 'features/parameters/components/ImageSize/ImageSizeContext';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
const ParamBoundingBoxHeight = () => {
const { t } = useTranslation();
const ctx = useImageSizeContext();
const isStaging = useAppSelector(isStagingSelector);
const optimalDimension = useAppSelector(selectOptimalDimension);
const sliderMin = useAppSelector((s) => s.config.sd.boundingBoxHeight.sliderMin);
const sliderMax = useAppSelector((s) => s.config.sd.boundingBoxHeight.sliderMax);
const numberInputMin = useAppSelector((s) => s.config.sd.boundingBoxHeight.numberInputMin);
const numberInputMax = useAppSelector((s) => s.config.sd.boundingBoxHeight.numberInputMax);
const coarseStep = useAppSelector((s) => s.config.sd.boundingBoxHeight.coarseStep);
const fineStep = useAppSelector((s) => s.config.sd.boundingBoxHeight.fineStep);
const onChange = useCallback(
(v: number) => {
ctx.heightChanged(v);
},
[ctx]
);
return (
<FormControl isDisabled={isStaging}>
<FormLabel>{t('parameters.height')}</FormLabel>
<CompositeSlider
min={sliderMin}
max={sliderMax}
step={coarseStep}
fineStep={fineStep}
value={ctx.height}
defaultValue={optimalDimension}
onChange={onChange}
marks
/>
<CompositeNumberInput
min={numberInputMin}
max={numberInputMax}
step={coarseStep}
fineStep={fineStep}
value={ctx.height}
defaultValue={optimalDimension}
onChange={onChange}
/>
</FormControl>
);
};
export default memo(ParamBoundingBoxHeight);

View File

@ -1,53 +0,0 @@
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { isStagingSelector } from 'features/canvas/store/canvasSelectors';
import { selectOptimalDimension } from 'features/controlLayers/store/selectors';
import { useImageSizeContext } from 'features/parameters/components/ImageSize/ImageSizeContext';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
const ParamBoundingBoxWidth = () => {
const { t } = useTranslation();
const ctx = useImageSizeContext();
const isStaging = useAppSelector(isStagingSelector);
const optimalDimension = useAppSelector(selectOptimalDimension);
const sliderMin = useAppSelector((s) => s.config.sd.boundingBoxWidth.sliderMin);
const sliderMax = useAppSelector((s) => s.config.sd.boundingBoxWidth.sliderMax);
const numberInputMin = useAppSelector((s) => s.config.sd.boundingBoxWidth.numberInputMin);
const numberInputMax = useAppSelector((s) => s.config.sd.boundingBoxWidth.numberInputMax);
const coarseStep = useAppSelector((s) => s.config.sd.boundingBoxWidth.coarseStep);
const fineStep = useAppSelector((s) => s.config.sd.boundingBoxWidth.fineStep);
const onChange = useCallback(
(v: number) => {
ctx.widthChanged(v);
},
[ctx]
);
return (
<FormControl isDisabled={isStaging}>
<FormLabel>{t('parameters.width')}</FormLabel>
<CompositeSlider
min={sliderMin}
max={sliderMax}
step={coarseStep}
fineStep={fineStep}
value={ctx.width}
defaultValue={optimalDimension}
onChange={onChange}
marks
/>
<CompositeNumberInput
min={numberInputMin}
max={numberInputMax}
step={coarseStep}
fineStep={fineStep}
value={ctx.width}
defaultValue={optimalDimension}
onChange={onChange}
/>
</FormControl>
);
};
export default memo(ParamBoundingBoxWidth);

View File

@ -1,27 +0,0 @@
import { Box } from '@invoke-ai/ui-library';
import { useCanvasGenerationMode } from 'features/canvas/hooks/useCanvasGenerationMode';
import { memo, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
const GenerationModeStatusText = () => {
const generationMode = useCanvasGenerationMode();
const { t } = useTranslation();
const GENERATION_MODE_NAME_MAP = useMemo(
() => ({
txt2img: t('common.txt2img'),
img2img: t('common.img2img'),
inpaint: t('common.inpaint'),
outpaint: t('common.outpaint'),
}),
[t]
);
return (
<Box>
{t('accessibility.mode')}: {generationMode ? GENERATION_MODE_NAME_MAP[generationMode] : '...'}
</Box>
);
};
export default memo(GenerationModeStatusText);

View File

@ -1,21 +0,0 @@
import { useStore } from '@nanostores/react';
import { $isPreviewVisible } from 'features/controlLayers/store/canvasV2Slice';
import { AspectRatioIconPreview } from 'features/parameters/components/DocumentSize/AspectRatioIconPreview';
import { memo } from 'react';
export const AspectRatioCanvasPreview = memo(() => {
const isPreviewVisible = useStore($isPreviewVisible);
return <AspectRatioIconPreview />;
// if (!isPreviewVisible) {
// return <AspectRatioIconPreview />;
// }
// return (
// <Flex w="full" h="full" alignItems="center" justifyContent="center" position="relative">
// <StageComponent asPreview />
// </Flex>
// );
});
AspectRatioCanvasPreview.displayName = 'AspectRatioCanvasPreview';

View File

@ -1,75 +0,0 @@
import { useSize } from '@chakra-ui/react-use-size';
import { Flex, Icon } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { AnimatePresence, motion } from 'framer-motion';
import { memo, useMemo, useRef } from 'react';
import { PiFrameCorners } from 'react-icons/pi';
import {
BOX_SIZE_CSS_CALC,
ICON_CONTAINER_STYLES,
ICON_HIGH_CUTOFF,
ICON_LOW_CUTOFF,
MOTION_ICON_ANIMATE,
MOTION_ICON_EXIT,
MOTION_ICON_INITIAL,
} from './constants';
export const AspectRatioIconPreview = memo(() => {
const bbox = useAppSelector((s) => s.canvasV2.bbox);
const containerRef = useRef<HTMLDivElement>(null);
const containerSize = useSize(containerRef);
const shouldShowIcon = useMemo(
() => bbox.aspectRatio.value < ICON_HIGH_CUTOFF && bbox.aspectRatio.value > ICON_LOW_CUTOFF,
[bbox.aspectRatio.value]
);
const { width, height } = useMemo(() => {
if (!containerSize) {
return { width: 0, height: 0 };
}
let width = bbox.rect.width;
let height = bbox.rect.height;
if (bbox.rect.width > bbox.rect.height) {
width = containerSize.width;
height = width / bbox.aspectRatio.value;
} else {
height = containerSize.height;
width = height * bbox.aspectRatio.value;
}
return { width, height };
}, [containerSize, bbox.rect.width, bbox.rect.height, bbox.aspectRatio.value]);
return (
<Flex w="full" h="full" alignItems="center" justifyContent="center" ref={containerRef}>
<Flex
bg="blackAlpha.400"
borderRadius="base"
width={`${width}px`}
height={`${height}px`}
alignItems="center"
justifyContent="center"
>
<AnimatePresence>
{shouldShowIcon && (
<Flex
as={motion.div}
initial={MOTION_ICON_INITIAL}
animate={MOTION_ICON_ANIMATE}
exit={MOTION_ICON_EXIT}
style={ICON_CONTAINER_STYLES}
>
<Icon as={PiFrameCorners} color="base.700" boxSize={BOX_SIZE_CSS_CALC} />
</Flex>
)}
</AnimatePresence>
</Flex>
</Flex>
);
});
AspectRatioIconPreview.displayName = 'AspectRatioIconPreview';

View File

@ -1,17 +0,0 @@
import { Flex } from '@invoke-ai/ui-library';
import { SettingsDeveloperLogIsEnabled } from 'features/system/components/SettingsModal/SettingsDeveloperLogIsEnabled';
import { SettingsDeveloperLogLevel } from 'features/system/components/SettingsModal/SettingsDeveloperLogLevel';
import { SettingsDeveloperLogNamespaces } from 'features/system/components/SettingsModal/SettingsDeveloperLogNamespaces';
import { memo } from 'react';
export const SettingsDeveloperContent = memo(() => {
return (
<Flex flexDir="column" gap={4}>
<SettingsDeveloperLogIsEnabled />
<SettingsDeveloperLogLevel />
<SettingsDeveloperLogNamespaces />
</Flex>
);
});
SettingsDeveloperContent.displayName = 'SettingsDeveloperContent';

View File

@ -1,35 +0,0 @@
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
import { isLogLevel, zLogLevel } from 'app/logging/logger';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { logLevelChanged } from 'features/system/store/systemSlice';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
export const SettingsLogLevelSelect = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const logLevel = useAppSelector((s) => s.system.logLevel);
const logIsEnabled = useAppSelector((s) => s.system.logIsEnabled);
const options = useMemo(() => zLogLevel.options.map((o) => ({ label: o, value: o })), []);
const value = useMemo(() => options.find((o) => o.value === logLevel), [logLevel, options]);
const onChange = useCallback<ComboboxOnChange>(
(v) => {
if (!isLogLevel(v?.value)) {
return;
}
dispatch(logLevelChanged(v.value));
},
[dispatch]
);
return (
<FormControl isDisabled={!logIsEnabled}>
<FormLabel>{t('common.loglevel')}</FormLabel>
<Combobox value={value} options={options} onChange={onChange} />
</FormControl>
);
});
SettingsLogLevelSelect.displayName = 'SettingsLogLevelSelect';

View File

@ -1,13 +0,0 @@
import { Box } from '@invoke-ai/ui-library';
import { CanvasEditor } from 'features/controlLayers/components/ControlLayersEditor';
import { memo } from 'react';
const TextToImageTab = () => {
return (
<Box layerStyle="first" position="relative" w="full" h="full" p={2} borderRadius="base">
<CanvasEditor />
</Box>
);
};
export default memo(TextToImageTab);

View File

@ -1,25 +0,0 @@
import { Box } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import { memo } from 'react';
const UpscalingTab = () => {
const activeTabName = useAppSelector(activeTabNameSelector);
return (
<Box
display={activeTabName === 'upscaling' ? undefined : 'none'}
hidden={activeTabName !== 'upscaling'}
layerStyle="first"
position="relative"
w="full"
h="full"
p={2}
borderRadius="base"
>
{/* <ImageViewer /> */}
</Box>
);
};
export default memo(UpscalingTab);

View File

@ -1,20 +0,0 @@
import { skipToken } from '@reduxjs/toolkit/query';
import { useGetModelConfigQuery } from 'services/api/endpoints/models';
import type { AnyModelConfig } from 'services/api/types';
export const useGetModelConfigWithTypeGuard = <T extends AnyModelConfig>(
key: string | typeof skipToken,
typeGuard: (config: AnyModelConfig) => config is T
) => {
const result = useGetModelConfigQuery(key ?? skipToken, {
selectFromResult: (result) => {
const modelConfig = result.currentData;
return {
...result,
modelConfig: modelConfig && typeGuard(modelConfig) ? modelConfig : undefined,
};
},
});
return result;
};

View File

@ -1,35 +1,34 @@
import type { S } from 'services/api/types'; import type { S } from 'services/api/types';
export type ModelLoadStartedEvent = S['ModelLoadStartedEvent']; type ModelLoadStartedEvent = S['ModelLoadStartedEvent'];
export type ModelLoadCompleteEvent = S['ModelLoadCompleteEvent']; type ModelLoadCompleteEvent = S['ModelLoadCompleteEvent'];
export type InvocationStartedEvent = S['InvocationStartedEvent']; type InvocationStartedEvent = S['InvocationStartedEvent'];
export type InvocationDenoiseProgressEvent = S['InvocationDenoiseProgressEvent']; type InvocationDenoiseProgressEvent = S['InvocationDenoiseProgressEvent'];
export type InvocationCompleteEvent = S['InvocationCompleteEvent']; type InvocationCompleteEvent = S['InvocationCompleteEvent'];
export type InvocationErrorEvent = S['InvocationErrorEvent']; type InvocationErrorEvent = S['InvocationErrorEvent'];
export type ProgressImage = InvocationDenoiseProgressEvent['progress_image'];
export type ModelInstallDownloadStartedEvent = S['ModelInstallDownloadStartedEvent']; type ModelInstallDownloadStartedEvent = S['ModelInstallDownloadStartedEvent'];
export type ModelInstallDownloadProgressEvent = S['ModelInstallDownloadProgressEvent']; type ModelInstallDownloadProgressEvent = S['ModelInstallDownloadProgressEvent'];
export type ModelInstallDownloadsCompleteEvent = S['ModelInstallDownloadsCompleteEvent']; type ModelInstallDownloadsCompleteEvent = S['ModelInstallDownloadsCompleteEvent'];
export type ModelInstallCompleteEvent = S['ModelInstallCompleteEvent']; type ModelInstallCompleteEvent = S['ModelInstallCompleteEvent'];
export type ModelInstallErrorEvent = S['ModelInstallErrorEvent']; type ModelInstallErrorEvent = S['ModelInstallErrorEvent'];
export type ModelInstallStartedEvent = S['ModelInstallStartedEvent']; type ModelInstallStartedEvent = S['ModelInstallStartedEvent'];
export type ModelInstallCancelledEvent = S['ModelInstallCancelledEvent']; type ModelInstallCancelledEvent = S['ModelInstallCancelledEvent'];
export type DownloadStartedEvent = S['DownloadStartedEvent']; type DownloadStartedEvent = S['DownloadStartedEvent'];
export type DownloadProgressEvent = S['DownloadProgressEvent']; type DownloadProgressEvent = S['DownloadProgressEvent'];
export type DownloadCompleteEvent = S['DownloadCompleteEvent']; type DownloadCompleteEvent = S['DownloadCompleteEvent'];
export type DownloadCancelledEvent = S['DownloadCancelledEvent']; type DownloadCancelledEvent = S['DownloadCancelledEvent'];
export type DownloadErrorEvent = S['DownloadErrorEvent']; type DownloadErrorEvent = S['DownloadErrorEvent'];
export type QueueItemStatusChangedEvent = S['QueueItemStatusChangedEvent']; type QueueItemStatusChangedEvent = S['QueueItemStatusChangedEvent'];
export type QueueClearedEvent = S['QueueClearedEvent']; type QueueClearedEvent = S['QueueClearedEvent'];
export type BatchEnqueuedEvent = S['BatchEnqueuedEvent']; type BatchEnqueuedEvent = S['BatchEnqueuedEvent'];
export type BulkDownloadStartedEvent = S['BulkDownloadStartedEvent']; type BulkDownloadStartedEvent = S['BulkDownloadStartedEvent'];
export type BulkDownloadCompleteEvent = S['BulkDownloadCompleteEvent']; type BulkDownloadCompleteEvent = S['BulkDownloadCompleteEvent'];
export type BulkDownloadFailedEvent = S['BulkDownloadErrorEvent']; type BulkDownloadFailedEvent = S['BulkDownloadErrorEvent'];
type ClientEmitSubscribeQueue = { type ClientEmitSubscribeQueue = {
queue_id: string; queue_id: string;