Merge branch 'main' into psyche/fix/ui/depth-anything-select

This commit is contained in:
blessedcoolant 2024-05-13 03:57:47 +05:30 committed by GitHub
commit cdc468a38c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 148 additions and 218 deletions

View File

@ -586,13 +586,6 @@ class DenoiseLatentsInvocation(BaseInvocation):
unet: UNet2DConditionModel, unet: UNet2DConditionModel,
scheduler: Scheduler, scheduler: Scheduler,
) -> StableDiffusionGeneratorPipeline: ) -> StableDiffusionGeneratorPipeline:
# TODO:
# configure_model_padding(
# unet,
# self.seamless,
# self.seamless_axes,
# )
class FakeVae: class FakeVae:
class FakeVaeConfig: class FakeVaeConfig:
def __init__(self) -> None: def __init__(self) -> None:

View File

@ -4,5 +4,4 @@ Initialization file for invokeai.backend.image_util methods.
from .infill_methods.patchmatch import PatchMatch # noqa: F401 from .infill_methods.patchmatch import PatchMatch # noqa: F401
from .pngwriter import PngWriter, PromptFormatter, retrieve_metadata, write_metadata # noqa: F401 from .pngwriter import PngWriter, PromptFormatter, retrieve_metadata, write_metadata # noqa: F401
from .seamless import configure_model_padding # noqa: F401
from .util import InitImageResizer, make_grid # noqa: F401 from .util import InitImageResizer, make_grid # noqa: F401

View File

@ -1,52 +0,0 @@
import torch.nn as nn
def _conv_forward_asymmetric(self, input, weight, bias):
"""
Patch for Conv2d._conv_forward that supports asymmetric padding
"""
working = nn.functional.pad(input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"])
working = nn.functional.pad(working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"])
return nn.functional.conv2d(
working,
weight,
bias,
self.stride,
nn.modules.utils._pair(0),
self.dilation,
self.groups,
)
def configure_model_padding(model, seamless, seamless_axes):
"""
Modifies the 2D convolution layers to use a circular padding mode based on
the `seamless` and `seamless_axes` options.
"""
# TODO: get an explicit interface for this in diffusers: https://github.com/huggingface/diffusers/issues/556
for m in model.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
if seamless:
m.asymmetric_padding_mode = {}
m.asymmetric_padding = {}
m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant"
m.asymmetric_padding["x"] = (
m._reversed_padding_repeated_twice[0],
m._reversed_padding_repeated_twice[1],
0,
0,
)
m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant"
m.asymmetric_padding["y"] = (
0,
0,
m._reversed_padding_repeated_twice[2],
m._reversed_padding_repeated_twice[3],
)
m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d)
else:
m._conv_forward = nn.Conv2d._conv_forward.__get__(m, nn.Conv2d)
if hasattr(m, "asymmetric_padding_mode"):
del m.asymmetric_padding_mode
if hasattr(m, "asymmetric_padding"):
del m.asymmetric_padding

View File

@ -1,89 +1,51 @@
from __future__ import annotations
from contextlib import contextmanager from contextlib import contextmanager
from typing import Callable, List, Union from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.nn as nn import torch.nn as nn
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
from diffusers.models.lora import LoRACompatibleConv
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
def _conv_forward_asymmetric(self, input, weight, bias):
"""
Patch for Conv2d._conv_forward that supports asymmetric padding
"""
working = nn.functional.pad(input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"])
working = nn.functional.pad(working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"])
return nn.functional.conv2d(
working,
weight,
bias,
self.stride,
nn.modules.utils._pair(0),
self.dilation,
self.groups,
)
@contextmanager @contextmanager
def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL, AutoencoderTiny], seamless_axes: List[str]): def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL, AutoencoderTiny], seamless_axes: List[str]):
if not seamless_axes: if not seamless_axes:
yield yield
return return
# Callable: (input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor # override conv_forward
to_restore: list[tuple[nn.Conv2d | nn.ConvTranspose2d, Callable]] = [] # https://github.com/huggingface/diffusers/issues/556#issuecomment-1993287019
def _conv_forward_asymmetric(self, input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None):
self.paddingX = (self._reversed_padding_repeated_twice[0], self._reversed_padding_repeated_twice[1], 0, 0)
self.paddingY = (0, 0, self._reversed_padding_repeated_twice[2], self._reversed_padding_repeated_twice[3])
working = torch.nn.functional.pad(input, self.paddingX, mode=x_mode)
working = torch.nn.functional.pad(working, self.paddingY, mode=y_mode)
return torch.nn.functional.conv2d(
working, weight, bias, self.stride, torch.nn.modules.utils._pair(0), self.dilation, self.groups
)
original_layers: List[Tuple[nn.Conv2d, Callable]] = []
try: try:
# Hard coded to skip down block layers, allowing for seamless tiling at the expense of prompt adherence x_mode = "circular" if "x" in seamless_axes else "constant"
skipped_layers = 1 y_mode = "circular" if "y" in seamless_axes else "constant"
for m_name, m in model.named_modules():
if not isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
continue
if isinstance(model, UNet2DConditionModel) and m_name.startswith("down_blocks.") and ".resnets." in m_name: conv_layers: List[torch.nn.Conv2d] = []
# down_blocks.1.resnets.1.conv1
_, block_num, _, resnet_num, submodule_name = m_name.split(".")
block_num = int(block_num)
resnet_num = int(resnet_num)
if block_num >= len(model.down_blocks) - skipped_layers: for module in model.modules():
continue if isinstance(module, torch.nn.Conv2d):
conv_layers.append(module)
# Skip the second resnet (could be configurable) for layer in conv_layers:
if resnet_num > 0: if isinstance(layer, LoRACompatibleConv) and layer.lora_layer is None:
continue layer.lora_layer = lambda *x: 0
original_layers.append((layer, layer._conv_forward))
# Skip Conv2d layers (could be configurable) layer._conv_forward = _conv_forward_asymmetric.__get__(layer, torch.nn.Conv2d)
if submodule_name == "conv2":
continue
m.asymmetric_padding_mode = {}
m.asymmetric_padding = {}
m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant"
m.asymmetric_padding["x"] = (
m._reversed_padding_repeated_twice[0],
m._reversed_padding_repeated_twice[1],
0,
0,
)
m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant"
m.asymmetric_padding["y"] = (
0,
0,
m._reversed_padding_repeated_twice[2],
m._reversed_padding_repeated_twice[3],
)
to_restore.append((m, m._conv_forward))
m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d)
yield yield
finally: finally:
for module, orig_conv_forward in to_restore: for layer, orig_conv_forward in original_layers:
module._conv_forward = orig_conv_forward layer._conv_forward = orig_conv_forward
if hasattr(module, "asymmetric_padding_mode"):
del module.asymmetric_padding_mode
if hasattr(module, "asymmetric_padding"):
del module.asymmetric_padding

View File

@ -1,13 +1,14 @@
import { isAnyOf } from '@reduxjs/toolkit'; import { isAnyOf } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger'; import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import type { AppDispatch } from 'app/store/store';
import { parseify } from 'common/util/serialize'; import { parseify } from 'common/util/serialize';
import { import {
caLayerImageChanged, caLayerImageChanged,
caLayerIsProcessingImageChanged,
caLayerModelChanged, caLayerModelChanged,
caLayerProcessedImageChanged, caLayerProcessedImageChanged,
caLayerProcessorConfigChanged, caLayerProcessorConfigChanged,
caLayerProcessorPendingBatchIdChanged,
caLayerRecalled, caLayerRecalled,
isControlAdapterLayer, isControlAdapterLayer,
} from 'features/controlLayers/store/controlLayersSlice'; } from 'features/controlLayers/store/controlLayersSlice';
@ -15,47 +16,39 @@ import { CA_PROCESSOR_DATA } from 'features/controlLayers/util/controlAdapters';
import { isImageOutput } from 'features/nodes/types/common'; import { isImageOutput } from 'features/nodes/types/common';
import { addToast } from 'features/system/store/systemSlice'; import { addToast } from 'features/system/store/systemSlice';
import { t } from 'i18next'; import { t } from 'i18next';
import { isEqual } from 'lodash-es'; import { getImageDTO } from 'services/api/endpoints/images';
import { imagesApi } from 'services/api/endpoints/images';
import { queueApi } from 'services/api/endpoints/queue'; import { queueApi } from 'services/api/endpoints/queue';
import type { BatchConfig, ImageDTO } from 'services/api/types'; import type { BatchConfig } from 'services/api/types';
import { socketInvocationComplete } from 'services/events/actions'; import { socketInvocationComplete } from 'services/events/actions';
import { assert } from 'tsafe';
const matcher = isAnyOf(caLayerImageChanged, caLayerProcessorConfigChanged, caLayerModelChanged, caLayerRecalled); const matcher = isAnyOf(caLayerImageChanged, caLayerProcessorConfigChanged, caLayerModelChanged, caLayerRecalled);
const DEBOUNCE_MS = 300; const DEBOUNCE_MS = 300;
const log = logger('session'); const log = logger('session');
/**
* Simple helper to cancel a batch and reset the pending batch ID
*/
const cancelProcessorBatch = async (dispatch: AppDispatch, layerId: string, batchId: string) => {
const req = dispatch(queueApi.endpoints.cancelByBatchIds.initiate({ batch_ids: [batchId] }));
log.trace({ batchId }, 'Cancelling existing preprocessor batch');
try {
await req.unwrap();
} catch {
// no-op
} finally {
req.reset();
// Always reset the pending batch ID - the cancel req could fail if the batch doesn't exist
dispatch(caLayerProcessorPendingBatchIdChanged({ layerId, batchId: null }));
}
};
export const addControlAdapterPreprocessor = (startAppListening: AppStartListening) => { export const addControlAdapterPreprocessor = (startAppListening: AppStartListening) => {
startAppListening({ startAppListening({
matcher, matcher,
effect: async (action, { dispatch, getState, getOriginalState, cancelActiveListeners, delay, take }) => { effect: async (action, { dispatch, getState, cancelActiveListeners, delay, take, signal }) => {
const layerId = caLayerRecalled.match(action) ? action.payload.id : action.payload.layerId; const layerId = caLayerRecalled.match(action) ? action.payload.id : action.payload.layerId;
const precheckLayerOriginal = getOriginalState()
.controlLayers.present.layers.filter(isControlAdapterLayer)
.find((l) => l.id === layerId);
const precheckLayer = getState()
.controlLayers.present.layers.filter(isControlAdapterLayer)
.find((l) => l.id === layerId);
// Conditions to bail
const layerDoesNotExist = !precheckLayer;
const layerHasNoImage = !precheckLayer?.controlAdapter.image;
const layerHasNoProcessorConfig = !precheckLayer?.controlAdapter.processorConfig;
const layerIsAlreadyProcessingImage = precheckLayer?.controlAdapter.isProcessingImage;
const areImageAndProcessorUnchanged =
isEqual(precheckLayer?.controlAdapter.image, precheckLayerOriginal?.controlAdapter.image) &&
isEqual(precheckLayer?.controlAdapter.processorConfig, precheckLayerOriginal?.controlAdapter.processorConfig);
if (
layerDoesNotExist ||
layerHasNoImage ||
layerHasNoProcessorConfig ||
areImageAndProcessorUnchanged ||
layerIsAlreadyProcessingImage
) {
return;
}
// Cancel any in-progress instances of this listener // Cancel any in-progress instances of this listener
cancelActiveListeners(); cancelActiveListeners();
@ -63,19 +56,31 @@ export const addControlAdapterPreprocessor = (startAppListening: AppStartListeni
// Delay before starting actual work // Delay before starting actual work
await delay(DEBOUNCE_MS); await delay(DEBOUNCE_MS);
dispatch(caLayerIsProcessingImageChanged({ layerId, isProcessingImage: true }));
// Double-check that we are still eligible for processing // Double-check that we are still eligible for processing
const state = getState(); const state = getState();
const layer = state.controlLayers.present.layers.filter(isControlAdapterLayer).find((l) => l.id === layerId); const layer = state.controlLayers.present.layers.filter(isControlAdapterLayer).find((l) => l.id === layerId);
const image = layer?.controlAdapter.image;
const config = layer?.controlAdapter.processorConfig;
// If we have no image or there is no processor config, bail // If we have no image or there is no processor config, bail
if (!layer || !image || !config) { if (!layer) {
return; return;
} }
const image = layer.controlAdapter.image;
const config = layer.controlAdapter.processorConfig;
if (!image || !config) {
// The user has reset the image or config, so we should clear the processed image
dispatch(caLayerProcessedImageChanged({ layerId, imageDTO: null }));
}
// At this point, the user has stopped fiddling with the processor settings and there is a processor selected.
// If there is a pending processor batch, cancel it.
if (layer.controlAdapter.processorPendingBatchId) {
cancelProcessorBatch(dispatch, layerId, layer.controlAdapter.processorPendingBatchId);
}
// @ts-expect-error: TS isn't able to narrow the typing of buildNode and `config` will error... // @ts-expect-error: TS isn't able to narrow the typing of buildNode and `config` will error...
const processorNode = CA_PROCESSOR_DATA[config.type].buildNode(image, config); const processorNode = CA_PROCESSOR_DATA[config.type].buildNode(image, config);
const enqueueBatchArg: BatchConfig = { const enqueueBatchArg: BatchConfig = {
@ -83,7 +88,11 @@ export const addControlAdapterPreprocessor = (startAppListening: AppStartListeni
batch: { batch: {
graph: { graph: {
nodes: { nodes: {
[processorNode.id]: { ...processorNode, is_intermediate: true }, [processorNode.id]: {
...processorNode,
// Control images are always intermediate - do not save to gallery
is_intermediate: true,
},
}, },
edges: [], edges: [],
}, },
@ -91,16 +100,21 @@ export const addControlAdapterPreprocessor = (startAppListening: AppStartListeni
}, },
}; };
try { // Kick off the processor batch
const req = dispatch( const req = dispatch(
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, { queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, {
fixedCacheKey: 'enqueueBatch', fixedCacheKey: 'enqueueBatch',
}) })
); );
try {
const enqueueResult = await req.unwrap(); const enqueueResult = await req.unwrap();
req.reset(); // TODO(psyche): Update the pydantic models, pretty sure we will _always_ have a batch_id here, but the model says it's optional
assert(enqueueResult.batch.batch_id, 'Batch ID not returned from queue');
dispatch(caLayerProcessorPendingBatchIdChanged({ layerId, batchId: enqueueResult.batch.batch_id }));
log.debug({ enqueueResult: parseify(enqueueResult) }, t('queue.graphQueued')); log.debug({ enqueueResult: parseify(enqueueResult) }, t('queue.graphQueued'));
// Wait for the processor node to complete
const [invocationCompleteAction] = await take( const [invocationCompleteAction] = await take(
(action): action is ReturnType<typeof socketInvocationComplete> => (action): action is ReturnType<typeof socketInvocationComplete> =>
socketInvocationComplete.match(action) && socketInvocationComplete.match(action) &&
@ -109,31 +123,33 @@ export const addControlAdapterPreprocessor = (startAppListening: AppStartListeni
); );
// We still have to check the output type // We still have to check the output type
if (isImageOutput(invocationCompleteAction.payload.data.result)) { assert(
isImageOutput(invocationCompleteAction.payload.data.result),
`Processor did not return an image output, got: ${invocationCompleteAction.payload.data.result}`
);
const { image_name } = invocationCompleteAction.payload.data.result.image; const { image_name } = invocationCompleteAction.payload.data.result.image;
// Wait for the ImageDTO to be received const imageDTO = await getImageDTO(image_name);
const [{ payload }] = await take( assert(imageDTO, "Failed to fetch processor output's image DTO");
(action) =>
imagesApi.endpoints.getImageDTO.matchFulfilled(action) && action.payload.image_name === image_name
);
const imageDTO = payload as ImageDTO;
// Whew! We made it. Update the layer with the processed image
log.debug({ layerId, imageDTO }, 'ControlNet image processed'); log.debug({ layerId, imageDTO }, 'ControlNet image processed');
dispatch(caLayerProcessedImageChanged({ layerId, imageDTO }));
// Update the processed image in the store dispatch(caLayerProcessorPendingBatchIdChanged({ layerId, batchId: null }));
dispatch(
caLayerProcessedImageChanged({
layerId,
imageDTO,
})
);
dispatch(caLayerIsProcessingImageChanged({ layerId, isProcessingImage: false }));
}
} catch (error) { } catch (error) {
if (signal.aborted) {
// The listener was canceled - we need to cancel the pending processor batch, if there is one (could have changed by now).
const pendingBatchId = getState()
.controlLayers.present.layers.filter(isControlAdapterLayer)
.find((l) => l.id === layerId)?.controlAdapter.processorPendingBatchId;
if (pendingBatchId) {
cancelProcessorBatch(dispatch, layerId, pendingBatchId);
}
log.trace('Control Adapter preprocessor cancelled');
} else {
// Some other error condition...
console.log(error);
log.error({ enqueueBatchArg: parseify(enqueueBatchArg) }, t('queue.graphFailedToQueue')); log.error({ enqueueBatchArg: parseify(enqueueBatchArg) }, t('queue.graphFailedToQueue'));
dispatch(caLayerIsProcessingImageChanged({ layerId, isProcessingImage: false }));
if (error instanceof Object) { if (error instanceof Object) {
if ('data' in error && 'status' in error) { if ('data' in error && 'status' in error) {
@ -151,6 +167,9 @@ export const addControlAdapterPreprocessor = (startAppListening: AppStartListeni
}) })
); );
} }
} finally {
req.reset();
}
}, },
}); });
}; };

View File

@ -124,7 +124,7 @@ export const ControlAdapterImagePreview = memo(
controlImage && controlImage &&
processedControlImage && processedControlImage &&
!isMouseOverImage && !isMouseOverImage &&
!controlAdapter.isProcessingImage && !controlAdapter.processorPendingBatchId &&
controlAdapter.processorConfig !== null; controlAdapter.processorConfig !== null;
useEffect(() => { useEffect(() => {
@ -190,7 +190,7 @@ export const ControlAdapterImagePreview = memo(
/> />
</> </>
{controlAdapter.isProcessingImage && ( {controlAdapter.processorPendingBatchId !== null && (
<Flex <Flex
position="absolute" position="absolute"
top={0} top={0}

View File

@ -27,7 +27,7 @@ import { modelChanged } from 'features/parameters/store/generationSlice';
import type { ParameterAutoNegative } from 'features/parameters/types/parameterSchemas'; import type { ParameterAutoNegative } from 'features/parameters/types/parameterSchemas';
import { getIsSizeOptimal, getOptimalDimension } from 'features/parameters/util/optimalDimension'; import { getIsSizeOptimal, getOptimalDimension } from 'features/parameters/util/optimalDimension';
import type { IRect, Vector2d } from 'konva/lib/types'; import type { IRect, Vector2d } from 'konva/lib/types';
import { isEqual, partition } from 'lodash-es'; import { isEqual, partition, unset } from 'lodash-es';
import { atom } from 'nanostores'; import { atom } from 'nanostores';
import type { RgbColor } from 'react-colorful'; import type { RgbColor } from 'react-colorful';
import type { UndoableOptions } from 'redux-undo'; import type { UndoableOptions } from 'redux-undo';
@ -49,7 +49,7 @@ import type {
} from './types'; } from './types';
export const initialControlLayersState: ControlLayersState = { export const initialControlLayersState: ControlLayersState = {
_version: 2, _version: 3,
selectedLayerId: null, selectedLayerId: null,
brushSize: 100, brushSize: 100,
layers: [], layers: [],
@ -334,13 +334,13 @@ export const controlLayersSlice = createSlice({
const layer = selectCALayerOrThrow(state, layerId); const layer = selectCALayerOrThrow(state, layerId);
layer.opacity = opacity; layer.opacity = opacity;
}, },
caLayerIsProcessingImageChanged: ( caLayerProcessorPendingBatchIdChanged: (
state, state,
action: PayloadAction<{ layerId: string; isProcessingImage: boolean }> action: PayloadAction<{ layerId: string; batchId: string | null }>
) => { ) => {
const { layerId, isProcessingImage } = action.payload; const { layerId, batchId } = action.payload;
const layer = selectCALayerOrThrow(state, layerId); const layer = selectCALayerOrThrow(state, layerId);
layer.controlAdapter.isProcessingImage = isProcessingImage; layer.controlAdapter.processorPendingBatchId = batchId;
}, },
//#endregion //#endregion
@ -800,7 +800,7 @@ export const {
caLayerProcessorConfigChanged, caLayerProcessorConfigChanged,
caLayerIsFilterEnabledChanged, caLayerIsFilterEnabledChanged,
caLayerOpacityChanged, caLayerOpacityChanged,
caLayerIsProcessingImageChanged, caLayerProcessorPendingBatchIdChanged,
// IPA Layers // IPA Layers
ipaLayerAdded, ipaLayerAdded,
ipaLayerRecalled, ipaLayerRecalled,
@ -857,7 +857,16 @@ export const selectControlLayersSlice = (state: RootState) => state.controlLayer
const migrateControlLayersState = (state: any): any => { const migrateControlLayersState = (state: any): any => {
if (state._version === 1) { if (state._version === 1) {
// Reset state for users on v1 (e.g. beta users), some changes could cause // Reset state for users on v1 (e.g. beta users), some changes could cause
return deepClone(initialControlLayersState); state = deepClone(initialControlLayersState);
}
if (state._version === 2) {
// The CA `isProcessingImage` flag was replaced with a `processorPendingBatchId` property, fix up CA layers
for (const layer of (state as ControlLayersState).layers) {
if (layer.type === 'control_adapter_layer') {
layer.controlAdapter.processorPendingBatchId = null;
unset(layer.controlAdapter, 'isProcessingImage');
}
}
} }
return state; return state;
}; };

View File

@ -113,7 +113,7 @@ export const zLayer = z.discriminatedUnion('type', [
export type Layer = z.infer<typeof zLayer>; export type Layer = z.infer<typeof zLayer>;
export type ControlLayersState = { export type ControlLayersState = {
_version: 2; _version: 3;
selectedLayerId: string | null; selectedLayerId: string | null;
layers: Layer[]; layers: Layer[];
brushSize: number; brushSize: number;

View File

@ -198,8 +198,8 @@ const zControlAdapterBase = z.object({
weight: z.number().gte(0).lte(1), weight: z.number().gte(0).lte(1),
image: zImageWithDims.nullable(), image: zImageWithDims.nullable(),
processedImage: zImageWithDims.nullable(), processedImage: zImageWithDims.nullable(),
isProcessingImage: z.boolean(),
processorConfig: zProcessorConfig.nullable(), processorConfig: zProcessorConfig.nullable(),
processorPendingBatchId: z.string().nullable().default(null),
beginEndStepPct: zBeginEndStepPct, beginEndStepPct: zBeginEndStepPct,
}); });
@ -521,8 +521,8 @@ export const initialControlNetV2: Omit<ControlNetConfigV2, 'id'> = {
controlMode: 'balanced', controlMode: 'balanced',
image: null, image: null,
processedImage: null, processedImage: null,
isProcessingImage: false,
processorConfig: CA_PROCESSOR_DATA.canny_image_processor.buildDefaults(), processorConfig: CA_PROCESSOR_DATA.canny_image_processor.buildDefaults(),
processorPendingBatchId: null,
}; };
export const initialT2IAdapterV2: Omit<T2IAdapterConfigV2, 'id'> = { export const initialT2IAdapterV2: Omit<T2IAdapterConfigV2, 'id'> = {
@ -532,8 +532,8 @@ export const initialT2IAdapterV2: Omit<T2IAdapterConfigV2, 'id'> = {
beginEndStepPct: [0, 1], beginEndStepPct: [0, 1],
image: null, image: null,
processedImage: null, processedImage: null,
isProcessingImage: false,
processorConfig: CA_PROCESSOR_DATA.canny_image_processor.buildDefaults(), processorConfig: CA_PROCESSOR_DATA.canny_image_processor.buildDefaults(),
processorPendingBatchId: null,
}; };
export const initialIPAdapterV2: Omit<IPAdapterConfigV2, 'id'> = { export const initialIPAdapterV2: Omit<IPAdapterConfigV2, 'id'> = {

View File

@ -587,7 +587,7 @@ const parseControlNetToControlAdapterLayer: MetadataParseFunc<ControlAdapterLaye
image: imageDTO ? imageDTOToImageWithDims(imageDTO) : null, image: imageDTO ? imageDTOToImageWithDims(imageDTO) : null,
processedImage: processedImageDTO ? imageDTOToImageWithDims(processedImageDTO) : null, processedImage: processedImageDTO ? imageDTOToImageWithDims(processedImageDTO) : null,
processorConfig, processorConfig,
isProcessingImage: false, processorPendingBatchId: null,
}, },
}; };
@ -651,7 +651,7 @@ const parseT2IAdapterToControlAdapterLayer: MetadataParseFunc<ControlAdapterLaye
image: imageDTO ? imageDTOToImageWithDims(imageDTO) : null, image: imageDTO ? imageDTOToImageWithDims(imageDTO) : null,
processedImage: processedImageDTO ? imageDTOToImageWithDims(processedImageDTO) : null, processedImage: processedImageDTO ? imageDTOToImageWithDims(processedImageDTO) : null,
processorConfig, processorConfig,
isProcessingImage: false, processorPendingBatchId: null,
}, },
}; };