refactor(ui): metadata recall (wip)

just enough let the app run
This commit is contained in:
psychedelicious 2024-06-16 12:24:06 +10:00
parent 29413f20a7
commit d6bd1e4a49
9 changed files with 231 additions and 225 deletions

View File

@ -27,7 +27,8 @@ export type LoggerNamespace =
| 'session'
| 'queue'
| 'dnd'
| 'controlLayers';
| 'controlLayers'
| 'metadata';
export const logger = (namespace: LoggerNamespace) => $logger.get().child({ namespace });

View File

@ -1,7 +1,6 @@
import type { TypedStartListening } from '@reduxjs/toolkit';
import { createListenerMiddleware } from '@reduxjs/toolkit';
import { addAdHocPostProcessingRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/addAdHocPostProcessingRequestedListener';
import { addCommitStagingAreaImageListener } from 'app/store/middleware/listenerMiddleware/listeners/addCommitStagingAreaImageListener';
import { addAnyEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/anyEnqueued';
import { addAppConfigReceivedListener } from 'app/store/middleware/listenerMiddleware/listeners/appConfigReceived';
import { addAppStartedListener } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
@ -9,17 +8,7 @@ import { addBatchEnqueuedListener } from 'app/store/middleware/listenerMiddlewar
import { addDeleteBoardAndImagesFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/boardAndImagesDeleted';
import { addBoardIdSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/boardIdSelected';
import { addBulkDownloadListeners } from 'app/store/middleware/listenerMiddleware/listeners/bulkDownload';
import { addCanvasCopiedToClipboardListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasCopiedToClipboard';
import { addCanvasDownloadedAsImageListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasDownloadedAsImage';
import { addCanvasImageToControlNetListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet';
import { addCanvasMaskSavedToGalleryListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMaskSavedToGallery';
import { addCanvasMaskToControlNetListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet';
import { addCanvasMergedListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMerged';
import { addCanvasSavedToGalleryListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasSavedToGallery';
import { addControlAdapterPreprocessor } from 'app/store/middleware/listenerMiddleware/listeners/controlAdapterPreprocessor';
import { addControlNetAutoProcessListener } from 'app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess';
import { addControlNetImageProcessedListener } from 'app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed';
import { addEnqueueRequestedCanvasListener } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedCanvas';
import { addEnqueueRequestedLinear } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear';
import { addEnqueueRequestedNodes } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedNodes';
import { addGalleryImageClickedListener } from 'app/store/middleware/listenerMiddleware/listeners/galleryImageClicked';
@ -46,7 +35,6 @@ import { addInvocationStartedEventListener } from 'app/store/middleware/listener
import { addModelInstallEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketModelInstall';
import { addModelLoadEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketModelLoad';
import { addSocketQueueItemStatusChangedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketQueueItemStatusChanged';
import { addStagingAreaImageSavedListener } from 'app/store/middleware/listenerMiddleware/listeners/stagingAreaImageSaved';
import { addUpdateAllNodesRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/updateAllNodesRequested';
import { addWorkflowLoadRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/workflowLoadRequested';
import type { AppDispatch, RootState } from 'app/store/store';
@ -83,7 +71,6 @@ addGalleryImageClickedListener(startAppListening);
addGalleryOffsetChangedListener(startAppListening);
// User Invoked
addEnqueueRequestedCanvasListener(startAppListening);
addEnqueueRequestedNodes(startAppListening);
addEnqueueRequestedLinear(startAppListening);
addEnqueueRequestedUpscale(startAppListening);
@ -91,15 +78,15 @@ addAnyEnqueuedListener(startAppListening);
addBatchEnqueuedListener(startAppListening);
// Canvas actions
addCanvasSavedToGalleryListener(startAppListening);
addCanvasMaskSavedToGalleryListener(startAppListening);
addCanvasImageToControlNetListener(startAppListening);
addCanvasMaskToControlNetListener(startAppListening);
addCanvasDownloadedAsImageListener(startAppListening);
addCanvasCopiedToClipboardListener(startAppListening);
addCanvasMergedListener(startAppListening);
addStagingAreaImageSavedListener(startAppListening);
addCommitStagingAreaImageListener(startAppListening);
// addCanvasSavedToGalleryListener(startAppListening);
// addCanvasMaskSavedToGalleryListener(startAppListening);
// addCanvasImageToControlNetListener(startAppListening);
// addCanvasMaskToControlNetListener(startAppListening);
// addCanvasDownloadedAsImageListener(startAppListening);
// addCanvasCopiedToClipboardListener(startAppListening);
// addCanvasMergedListener(startAppListening);
// addStagingAreaImageSavedListener(startAppListening);
// addCommitStagingAreaImageListener(startAppListening);
// Socket.IO
addGeneratorProgressEventListener(startAppListening);
@ -113,10 +100,6 @@ addModelInstallEventListener(startAppListening);
addSocketQueueItemStatusChangedEventListener(startAppListening);
addBulkDownloadListeners(startAppListening);
// ControlNet
addControlNetImageProcessedListener(startAppListening);
addControlNetAutoProcessListener(startAppListening);
// Boards
addImageAddedToBoardFulfilledListener(startAppListening);
addImageRemovedFromBoardFulfilledListener(startAppListening);

View File

@ -44,15 +44,14 @@ export const RASTER_LAYER_IMAGE_NAME = 'raster_layer.image';
export const INPAINT_MASK_LAYER_NAME = 'inpaint_mask_layer';
// Getters for non-singleton layer and object IDs
export const getRGLayerId = (layerId: string) => `${RG_LAYER_NAME}_${layerId}`;
export const getRasterLayerId = (layerId: string) => `${RASTER_LAYER_NAME}_${layerId}`;
export const getBrushLineId = (layerId: string, lineId: string) => `${layerId}.brush_line_${lineId}`;
export const getEraserLineId = (layerId: string, lineId: string) => `${layerId}.eraser_line_${lineId}`;
export const getRectShapeId = (layerId: string, lineId: string) => `${layerId}.rect_${lineId}`;
export const getImageObjectId = (layerId: string, imageName: string) => `${layerId}.image_${imageName}`;
export const getObjectGroupId = (layerId: string, groupId: string) => `${layerId}.objectGroup_${groupId}`;
export const getLayerBboxId = (layerId: string) => `${layerId}.bbox`;
export const getCALayerId = (layerId: string) => `control_adapter_layer_${layerId}`;
export const getCALayerImageId = (layerId: string, imageName: string) => `${layerId}.image_${imageName}`;
export const getIILayerImageId = (layerId: string, imageName: string) => `${layerId}.image_${imageName}`;
export const getIPALayerId = (layerId: string) => `ip_adapter_layer_${layerId}`;
export const getRGId = (entityId: string) => `${RG_LAYER_NAME}_${entityId}`;
export const getLayerId = (entityId: string) => `${RASTER_LAYER_NAME}_${entityId}`;
export const getBrushLineId = (entityId: string, lineId: string) => `${entityId}.brush_line_${lineId}`;
export const getEraserLineId = (entityId: string, lineId: string) => `${entityId}.eraser_line_${lineId}`;
export const getRectShapeId = (entityId: string, rectId: string) => `${entityId}.rect_${rectId}`;
export const getImageObjectId = (entityId: string, imageName: string) => `${entityId}.image_${imageName}`;
export const getObjectGroupId = (entityId: string, groupId: string) => `${entityId}.objectGroup_${groupId}`;
export const getLayerBboxId = (entityId: string) => `${entityId}.bbox`;
export const getCAId = (entityId: string) => `control_adapter_${entityId}`;
export const getCAImageId = (entityId: string, imageName: string) => `${entityId}.image_${imageName}`;
export const getIPAId = (entityId: string) => `ip_adapter_${entityId}`;

View File

@ -1,5 +1,5 @@
import { LightnessToAlphaFilter } from 'features/controlLayers/konva/filters';
import { CA_LAYER_IMAGE_NAME, CA_LAYER_NAME, getCALayerImageId } from 'features/controlLayers/konva/naming';
import { CA_LAYER_IMAGE_NAME, CA_LAYER_NAME, getCAImageId } from 'features/controlLayers/konva/naming';
import type { ControlAdapterData } from 'features/controlLayers/store/types';
import Konva from 'konva';
import type { ImageDTO } from 'services/api/types';
@ -61,7 +61,7 @@ const updateCALayerImageSource = async (
return;
}
const imageEl = new Image();
const imageId = getCALayerImageId(ca.id, imageName);
const imageId = getCAImageId(ca.id, imageName);
imageEl.onload = () => {
// Find the existing image or create a new one - must find using the name, bc the id may have just changed
const konvaImage =
@ -144,7 +144,7 @@ export const renderCALayer = (
if (canvasImageSource instanceof HTMLImageElement) {
const image = ca.processedImage ?? ca.image;
if (image && canvasImageSource.id !== getCALayerImageId(ca.id, image.name)) {
if (image && canvasImageSource.id !== getCAImageId(ca.id, image.name)) {
imageSourceNeedsUpdate = true;
} else if (!image) {
imageSourceNeedsUpdate = true;

View File

@ -574,7 +574,7 @@ const zRect = z.object({
height: z.number().min(1),
});
const zLayerData = z.object({
export const zLayerData = z.object({
id: zId,
type: z.literal('layer'),
isEnabled: z.boolean(),
@ -587,7 +587,7 @@ const zLayerData = z.object({
});
export type LayerData = z.infer<typeof zLayerData>;
const zIPAdapterData = z.object({
export const zIPAdapterData = z.object({
id: zId,
type: z.literal('ip_adapter'),
isEnabled: z.boolean(),
@ -637,7 +637,7 @@ const zMaskObject = z
})
.pipe(z.discriminatedUnion('type', [zBrushLine, zEraserline, zRectShape]));
const zRegionalGuidanceData = z.object({
export const zRegionalGuidanceData = z.object({
id: zId,
type: z.literal('regional_guidance'),
isEnabled: z.boolean(),
@ -709,7 +709,7 @@ const zT2IAdapterData = zControlAdapterDataBase.extend({
});
export type T2IAdapterData = z.infer<typeof zT2IAdapterData>;
const zControlAdapterData = z.discriminatedUnion('adapterType', [zControlNetData, zT2IAdapterData]);
export const zControlAdapterData = z.discriminatedUnion('adapterType', [zControlNetData, zT2IAdapterData]);
export type ControlAdapterData = z.infer<typeof zControlAdapterData>;
export type ControlNetConfig = Pick<
ControlNetData,

View File

@ -1,12 +1,5 @@
import {
initialControlNet,
initialIPAdapter,
initialT2IAdapter,
} from 'features/controlAdapters/util/buildControlAdapter';
import { buildControlAdapterProcessor } from 'features/controlAdapters/util/buildControlAdapterProcessor';
import { getCALayerId, getIPALayerId, INITIAL_IMAGE_LAYER_ID } from 'features/controlLayers/konva/naming';
import type { ControlAdapterLayer, InitialImageLayer, IPAdapterLayer, LayerData } from 'features/controlLayers/store/types';
import { zLayer } from 'features/controlLayers/store/types';
import { getCAId, getImageObjectId, getIPAId, getLayerId } from 'features/controlLayers/konva/naming';
import type { ControlAdapterData, IPAdapterData, LayerData } from 'features/controlLayers/store/types';
import {
CA_PROCESSOR_DATA,
imageDTOToImageWithDims,
@ -14,7 +7,8 @@ import {
initialIPAdapterV2,
initialT2IAdapterV2,
isProcessorTypeV2,
} from 'features/controlLayers/util/controlAdapters';
zLayerData,
} from 'features/controlLayers/store/types';
import type { LoRA } from 'features/lora/store/loraSlice';
import { defaultLoRAConfig } from 'features/lora/store/loraSlice';
import type {
@ -431,7 +425,7 @@ const parseAllIPAdapters: MetadataParseFunc<IPAdapterConfigMetadata[]> = async (
};
//#region Control Layers
const parseLayer: MetadataParseFunc<LayerData> = async (metadataItem) => zLayer.parseAsync(metadataItem);
const parseLayer: MetadataParseFunc<LayerData> = async (metadataItem) => zLayerData.parseAsync(metadataItem);
const parseLayers: MetadataParseFunc<LayerData[]> = async (metadata) => {
// We need to support recalling pre-Control Layers metadata into Control Layers. A separate set of parsers handles
@ -459,7 +453,7 @@ const parseLayers: MetadataParseFunc<LayerData[]> = async (metadata) => {
controlNetsRaw.map(async (cn) => await parseControlNetToControlAdapterLayer(cn))
);
const controlNetsAsLayers = controlNetsParseResults
.filter((result): result is PromiseFulfilledResult<ControlAdapterLayer> => result.status === 'fulfilled')
.filter((result): result is PromiseFulfilledResult<ControlAdapterData> => result.status === 'fulfilled')
.map((result) => result.value);
layers.push(...controlNetsAsLayers);
} catch {
@ -472,7 +466,7 @@ const parseLayers: MetadataParseFunc<LayerData[]> = async (metadata) => {
t2iAdaptersRaw.map(async (cn) => await parseT2IAdapterToControlAdapterLayer(cn))
);
const t2iAdaptersAsLayers = t2iAdaptersParseResults
.filter((result): result is PromiseFulfilledResult<ControlAdapterLayer> => result.status === 'fulfilled')
.filter((result): result is PromiseFulfilledResult<ControlAdapterData> => result.status === 'fulfilled')
.map((result) => result.value);
layers.push(...t2iAdaptersAsLayers);
} catch {
@ -485,7 +479,7 @@ const parseLayers: MetadataParseFunc<LayerData[]> = async (metadata) => {
ipAdaptersRaw.map(async (cn) => await parseIPAdapterToIPAdapterLayer(cn))
);
const ipAdaptersAsLayers = ipAdaptersParseResults
.filter((result): result is PromiseFulfilledResult<IPAdapterLayer> => result.status === 'fulfilled')
.filter((result): result is PromiseFulfilledResult<IPAdapterData> => result.status === 'fulfilled')
.map((result) => result.value);
layers.push(...ipAdaptersAsLayers);
} catch {
@ -505,28 +499,38 @@ const parseLayers: MetadataParseFunc<LayerData[]> = async (metadata) => {
}
};
const parseInitialImageToInitialImageLayer: MetadataParseFunc<InitialImageLayer> = async (metadata) => {
const denoisingStrength = await getProperty(metadata, 'strength', isParameterStrength);
const parseInitialImageToInitialImageLayer: MetadataParseFunc<LayerData> = async (metadata) => {
// TODO(psyche): recall denoise strength
// const denoisingStrength = await getProperty(metadata, 'strength', isParameterStrength);
const imageName = await getProperty(metadata, 'init_image', isString);
const imageDTO = await getImageDTO(imageName);
assert(imageDTO, 'ImageDTO is null');
const layer: InitialImageLayer = {
id: INITIAL_IMAGE_LAYER_ID,
type: 'initial_image_layer',
const id = getLayerId(uuidv4());
const layer: LayerData = {
id,
type: 'layer',
bbox: null,
bboxNeedsUpdate: true,
x: 0,
y: 0,
isEnabled: true,
opacity: 1,
image: imageDTOToImageWithDims(imageDTO),
isSelected: true,
denoisingStrength,
objects: [
{
type: 'image',
id: getImageObjectId(id, imageDTO.image_name),
width: imageDTO.width,
height: imageDTO.height,
image: imageDTOToImageWithDims(imageDTO),
x: 0,
y: 0,
},
],
};
return layer;
};
const parseControlNetToControlAdapterLayer: MetadataParseFunc<ControlAdapterLayer> = async (metadataItem) => {
const parseControlNetToControlAdapterLayer: MetadataParseFunc<ControlAdapterData> = async (metadataItem) => {
const control_model = await getProperty(metadataItem, 'control_model');
const key = await getModelKey(control_model, 'controlnet');
const controlNetModel = await fetchModelConfigWithTypeGuard(key, isControlNetModelConfig);
@ -566,35 +570,31 @@ const parseControlNetToControlAdapterLayer: MetadataParseFunc<ControlAdapterLaye
const imageDTO = image ? await getImageDTO(image.image_name) : null;
const processedImageDTO = processedImage ? await getImageDTO(processedImage.image_name) : null;
const layer: ControlAdapterLayer = {
id: getCALayerId(uuidv4()),
const layer: ControlAdapterData = {
id: getCAId(uuidv4()),
type: 'control_adapter',
bbox: null,
bboxNeedsUpdate: true,
isEnabled: true,
isFilterEnabled: true,
isSelected: true,
opacity: 1,
type: 'control_adapter_layer',
filter: 'LightnessToAlphaFilter',
x: 0,
y: 0,
controlAdapter: {
id: uuidv4(),
type: 'controlnet',
model: zModelIdentifierField.parse(controlNetModel),
weight: typeof control_weight === 'number' ? control_weight : initialControlNetV2.weight,
beginEndStepPct,
controlMode: control_mode ?? initialControlNetV2.controlMode,
image: imageDTO ? imageDTOToImageWithDims(imageDTO) : null,
processedImage: processedImageDTO ? imageDTOToImageWithDims(processedImageDTO) : null,
processorConfig,
processorPendingBatchId: null,
},
adapterType: 'controlnet',
model: zModelIdentifierField.parse(controlNetModel),
weight: typeof control_weight === 'number' ? control_weight : initialControlNetV2.weight,
beginEndStepPct,
controlMode: control_mode ?? initialControlNetV2.controlMode,
image: imageDTO ? imageDTOToImageWithDims(imageDTO) : null,
processedImage: processedImageDTO ? imageDTOToImageWithDims(processedImageDTO) : null,
processorConfig,
processorPendingBatchId: null,
};
return layer;
};
const parseT2IAdapterToControlAdapterLayer: MetadataParseFunc<ControlAdapterLayer> = async (metadataItem) => {
const parseT2IAdapterToControlAdapterLayer: MetadataParseFunc<ControlAdapterData> = async (metadataItem) => {
const t2i_adapter_model = await getProperty(metadataItem, 't2i_adapter_model');
const key = await getModelKey(t2i_adapter_model, 't2i_adapter');
const t2iAdapterModel = await fetchModelConfigWithTypeGuard(key, isT2IAdapterModelConfig);
@ -631,34 +631,30 @@ const parseT2IAdapterToControlAdapterLayer: MetadataParseFunc<ControlAdapterLaye
const imageDTO = image ? await getImageDTO(image.image_name) : null;
const processedImageDTO = processedImage ? await getImageDTO(processedImage.image_name) : null;
const layer: ControlAdapterLayer = {
id: getCALayerId(uuidv4()),
const layer: ControlAdapterData = {
id: getCAId(uuidv4()),
bbox: null,
bboxNeedsUpdate: true,
isEnabled: true,
isFilterEnabled: true,
isSelected: true,
filter: 'LightnessToAlphaFilter',
opacity: 1,
type: 'control_adapter_layer',
type: 'control_adapter',
x: 0,
y: 0,
controlAdapter: {
id: uuidv4(),
type: 't2i_adapter',
model: zModelIdentifierField.parse(t2iAdapterModel),
weight: typeof weight === 'number' ? weight : initialT2IAdapterV2.weight,
beginEndStepPct,
image: imageDTO ? imageDTOToImageWithDims(imageDTO) : null,
processedImage: processedImageDTO ? imageDTOToImageWithDims(processedImageDTO) : null,
processorConfig,
processorPendingBatchId: null,
},
adapterType: 't2i_adapter',
model: zModelIdentifierField.parse(t2iAdapterModel),
weight: typeof weight === 'number' ? weight : initialT2IAdapterV2.weight,
beginEndStepPct,
image: imageDTO ? imageDTOToImageWithDims(imageDTO) : null,
processedImage: processedImageDTO ? imageDTOToImageWithDims(processedImageDTO) : null,
processorConfig,
processorPendingBatchId: null,
};
return layer;
};
const parseIPAdapterToIPAdapterLayer: MetadataParseFunc<IPAdapterLayer> = async (metadataItem) => {
const parseIPAdapterToIPAdapterLayer: MetadataParseFunc<IPAdapterData> = async (metadataItem) => {
const ip_adapter_model = await getProperty(metadataItem, 'ip_adapter_model');
const key = await getModelKey(ip_adapter_model, 'ip_adapter');
const ipAdapterModel = await fetchModelConfigWithTypeGuard(key, isIPAdapterModelConfig);
@ -690,21 +686,16 @@ const parseIPAdapterToIPAdapterLayer: MetadataParseFunc<IPAdapterLayer> = async
];
const imageDTO = image ? await getImageDTO(image.image_name) : null;
const layer: IPAdapterLayer = {
id: getIPALayerId(uuidv4()),
type: 'ip_adapter_layer',
const layer: IPAdapterData = {
id: getIPAId(uuidv4()),
type: 'ip_adapter',
isEnabled: true,
isSelected: true,
ipAdapter: {
id: uuidv4(),
type: 'ip_adapter',
model: zModelIdentifierField.parse(ipAdapterModel),
weight: typeof weight === 'number' ? weight : initialIPAdapterV2.weight,
beginEndStepPct,
image: imageDTO ? imageDTOToImageWithDims(imageDTO) : null,
clipVisionModel: initialIPAdapterV2.clipVisionModel, // TODO: This needs to be added to the zIPAdapterField...
method: method ?? initialIPAdapterV2.method,
},
model: zModelIdentifierField.parse(ipAdapterModel),
weight: typeof weight === 'number' ? weight : initialIPAdapterV2.weight,
beginEndStepPct,
image: imageDTO ? imageDTOToImageWithDims(imageDTO) : null,
clipVisionModel: initialIPAdapterV2.clipVisionModel, // TODO: This needs to be added to the zIPAdapterField...
method: method ?? initialIPAdapterV2.method,
};
return layer;

View File

@ -1,26 +1,48 @@
import { logger } from 'app/logging/logger';
import { getStore } from 'app/store/nanostores/store';
import { deepClone } from 'common/util/deepClone';
import {
controlAdapterRecalled,
controlNetsReset,
ipAdaptersReset,
t2iAdaptersReset,
} from 'features/controlAdapters/store/controlAdaptersSlice';
import { getCALayerId, getIPALayerId, getRGLayerId } from 'features/controlLayers/konva/naming';
getBrushLineId,
getCAId,
getEraserLineId,
getImageObjectId,
getIPAId,
getRectShapeId,
getRGId,
} from 'features/controlLayers/konva/naming';
import {
allLayersDeleted,
controlAdapterRecalled,
caRecalled,
heightChanged,
iiLayerRecalled,
ipAdapterRecalled,
ipaRecalled,
layerAllDeleted,
layerRecalled,
negativePrompt2Changed,
negativePromptChanged,
positivePrompt2Changed,
positivePromptChanged,
regionalGuidanceRecalled,
refinerModelChanged,
rgRecalled,
setCfgRescaleMultiplier,
setCfgScale,
setImg2imgStrength,
setRefinerCFGScale,
setRefinerNegativeAestheticScore,
setRefinerPositiveAestheticScore,
setRefinerScheduler,
setRefinerStart,
setRefinerSteps,
setScheduler,
setSeed,
setSteps,
vaeSelected,
widthChanged,
} from 'features/controlLayers/store/canvasV2Slice';
import type { LayerData } from 'features/controlLayers/store/types';
import type {
ControlAdapterData,
IPAdapterData,
LayerData,
RegionalGuidanceData,
} from 'features/controlLayers/store/types';
import { setHrfEnabled, setHrfMethod, setHrfStrength } from 'features/hrf/store/hrfSlice';
import type { LoRA } from 'features/lora/store/loraSlice';
import { loraRecalled, lorasReset } from 'features/lora/store/loraSlice';
@ -32,15 +54,6 @@ import type {
} from 'features/metadata/types';
import { fetchModelConfigByIdentifier } from 'features/metadata/util/modelFetchingHelpers';
import { modelSelected } from 'features/parameters/store/actions';
import {
setCfgRescaleMultiplier,
setCfgScale,
setImg2imgStrength,
setScheduler,
setSeed,
setSteps,
vaeSelected,
} from 'features/canvas/store/canvasSlice';
import type {
ParameterCFGRescaleMultiplier,
ParameterCFGScale,
@ -63,15 +76,6 @@ import type {
ParameterVAEModel,
ParameterWidth,
} from 'features/parameters/types/parameterSchemas';
import {
refinerModelChanged,
setRefinerCFGScale,
setRefinerNegativeAestheticScore,
setRefinerPositiveAestheticScore,
setRefinerScheduler,
setRefinerStart,
setRefinerSteps,
} from 'features/sdxl/store/sdxlSlice';
import { getImageDTO } from 'services/api/endpoints/images';
import { v4 as uuidv4 } from 'uuid';
@ -241,93 +245,122 @@ const recallIPAdapters: MetadataRecallFunc<IPAdapterConfigMetadata[]> = (ipAdapt
});
};
const recallCA: MetadataRecallFunc<ControlAdapterData> = async (ca) => {
const { dispatch } = getStore();
const clone = deepClone(ca);
if (clone.image) {
const imageDTO = await getImageDTO(clone.image.name);
if (!imageDTO) {
clone.image = null;
}
}
if (clone.processedImage) {
const imageDTO = await getImageDTO(clone.processedImage.name);
if (!imageDTO) {
clone.processedImage = null;
}
}
if (clone.model) {
try {
await fetchModelConfigByIdentifier(clone.model);
} catch {
// MODEL SMITED!
clone.model = null;
}
}
// No clobber
clone.id = getCAId(uuidv4());
dispatch(caRecalled({ data: clone }));
return;
};
const recallIPA: MetadataRecallFunc<IPAdapterData> = async (ipa) => {
const { dispatch } = getStore();
const clone = deepClone(ipa);
if (clone.image) {
const imageDTO = await getImageDTO(clone.image.name);
if (!imageDTO) {
clone.image = null;
}
}
if (clone.model) {
try {
await fetchModelConfigByIdentifier(clone.model);
} catch {
// MODEL SMITED!
clone.model = null;
}
}
// No clobber
clone.id = getIPAId(uuidv4());
dispatch(ipaRecalled({ data: clone }));
return;
};
const recallRG: MetadataRecallFunc<RegionalGuidanceData> = async (rg) => {
const { dispatch } = getStore();
const clone = deepClone(rg);
// Strip out the uploaded mask image property - this is an intermediate image
clone.imageCache = null;
for (const ipAdapter of clone.ipAdapters) {
if (ipAdapter.image) {
const imageDTO = await getImageDTO(ipAdapter.image.name);
if (!imageDTO) {
ipAdapter.image = null;
}
}
if (ipAdapter.model) {
try {
await fetchModelConfigByIdentifier(ipAdapter.model);
} catch {
// MODEL SMITED!
ipAdapter.model = null;
}
}
// No clobber
ipAdapter.id = uuidv4();
}
clone.id = getRGId(uuidv4());
dispatch(rgRecalled({ data: clone }));
return;
};
//#region Control Layers
const recallLayer: MetadataRecallFunc<LayerData> = async (layer) => {
const { dispatch } = getStore();
// We need to check for the existence of all images and models when recalling. If they do not exist, SMITE THEM!
// Also, we need fresh IDs for all objects when recalling, to prevent multiple layers with the same ID.
if (layer.type === 'control_adapter_layer') {
const clone = deepClone(layer);
if (clone.controlAdapter.image) {
const imageDTO = await getImageDTO(clone.controlAdapter.image.name);
const clone = deepClone(layer);
const invalidObjects: string[] = [];
for (const obj of clone.objects) {
if (obj.type === 'image') {
const imageDTO = await getImageDTO(obj.image.name);
if (!imageDTO) {
clone.controlAdapter.image = null;
invalidObjects.push(obj.id);
}
}
if (clone.controlAdapter.processedImage) {
const imageDTO = await getImageDTO(clone.controlAdapter.processedImage.name);
if (!imageDTO) {
clone.controlAdapter.processedImage = null;
}
}
if (clone.controlAdapter.model) {
try {
await fetchModelConfigByIdentifier(clone.controlAdapter.model);
} catch {
clone.controlAdapter.model = null;
}
}
clone.id = getCALayerId(uuidv4());
clone.controlAdapter.id = uuidv4();
dispatch(controlAdapterRecalled(clone));
return;
}
if (layer.type === 'ip_adapter_layer') {
const clone = deepClone(layer);
if (clone.ipAdapter.image) {
const imageDTO = await getImageDTO(clone.ipAdapter.image.name);
if (!imageDTO) {
clone.ipAdapter.image = null;
}
clone.objects = clone.objects.filter(({ id }) => !invalidObjects.includes(id));
for (const obj of clone.objects) {
if (obj.type === 'brush_line') {
obj.id = getBrushLineId(clone.id, uuidv4());
} else if (obj.type === 'eraser_line') {
obj.id = getEraserLineId(clone.id, uuidv4());
} else if (obj.type === 'image') {
obj.id = getImageObjectId(clone.id, uuidv4());
} else if (obj.type === 'rect_shape') {
obj.id = getRectShapeId(clone.id, uuidv4());
} else {
logger('metadata').error(`Unknown object type ${obj.type}`);
}
if (clone.ipAdapter.model) {
try {
await fetchModelConfigByIdentifier(clone.ipAdapter.model);
} catch {
clone.ipAdapter.model = null;
}
}
clone.id = getIPALayerId(uuidv4());
clone.ipAdapter.id = uuidv4();
dispatch(ipAdapterRecalled(clone));
return;
}
if (layer.type === 'regional_guidance_layer') {
const clone = deepClone(layer);
// Strip out the uploaded mask image property - this is an intermediate image
clone.uploadedMaskImage = null;
for (const ipAdapter of clone.ipAdapters) {
if (ipAdapter.image) {
const imageDTO = await getImageDTO(ipAdapter.image.name);
if (!imageDTO) {
ipAdapter.image = null;
}
}
if (ipAdapter.model) {
try {
await fetchModelConfigByIdentifier(ipAdapter.model);
} catch {
ipAdapter.model = null;
}
}
ipAdapter.id = uuidv4();
}
clone.id = getRGLayerId(uuidv4());
dispatch(regionalGuidanceRecalled(clone));
return;
}
if (layer.type === 'initial_image_layer') {
dispatch(iiLayerRecalled(layer));
return;
}
clone.id = getRGId(uuidv4());
dispatch(layerRecalled({ data: clone }));
return;
};
const recallLayers: MetadataRecallFunc<LayerData[]> = (layers) => {
const { dispatch } = getStore();
dispatch(allLayersDeleted());
dispatch(layerAllDeleted());
for (const l of layers) {
recallLayer(l);
}

View File

@ -22,7 +22,7 @@ const validateBaseCompatibility = (base?: BaseModelType, message?: string) => {
if (!base) {
throw new InvalidModelConfigError(message || 'Missing base');
}
const currentBase = getStore().getState().generation.model?.base;
const currentBase = getStore().getState().params.model?.base;
if (currentBase && base !== currentBase) {
throw new InvalidModelConfigError(message || `Incompatible base models: ${base} and ${currentBase}`);
}

View File

@ -9,8 +9,7 @@ import { getHasMetadata, removeMetadata } from './canvas/metadata';
import { CANVAS_COHERENCE_NOISE, METADATA, NOISE, POSITIVE_CONDITIONING } from './constants';
export const prepareLinearUIBatch = (state: RootState, graph: NonNullableGraph, prepend: boolean): BatchConfig => {
const { iterations, model, shouldRandomizeSeed, seed } = state.canvasV2.params;
const { shouldConcatPrompts } = state.canvasV2;
const { iterations, model, shouldRandomizeSeed, seed, shouldConcatPrompts } = state.canvasV2.params;
const { prompts, seedBehaviour } = state.dynamicPrompts;
const data: Batch['data'] = [];