feat(nodes,ui): use new metadata nodes for all metadata

This commit is contained in:
psychedelicious
2023-09-23 14:51:39 +10:00
parent 57b0e175da
commit 338081f855
28 changed files with 1466 additions and 2448 deletions

View File

@ -25,6 +25,7 @@ from controlnet_aux import (
from controlnet_aux.util import HWC3, ade_palette
from PIL import Image
from pydantic import BaseModel, Field, validator
from invokeai.app.invocations.metadata import WithMetadata
from invokeai.app.invocations.primitives import ImageField, ImageOutput
@ -38,6 +39,7 @@ from .baseinvocation import (
InputField,
InvocationContext,
OutputField,
WithWorkflow,
invocation,
invocation_output,
)
@ -127,7 +129,7 @@ class ControlNetInvocation(BaseInvocation):
@invocation(
"image_processor", title="Base Image Processor", tags=["controlnet"], category="controlnet", version="1.0.0"
)
class ImageProcessorInvocation(BaseInvocation):
class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Base class for invocations that preprocess images for ControlNet"""
image: ImageField = InputField(description="The image to process")
@ -150,6 +152,7 @@ class ImageProcessorInvocation(BaseInvocation):
session_id=context.graph_execution_state_id,
node_id=self.id,
is_intermediate=self.is_intermediate,
metadata=self.metadata.data if self.metadata else None,
workflow=self.workflow,
)

View File

@ -13,8 +13,7 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.controlnet_image_processors import ControlField
from invokeai.app.invocations.model import LoRAModelField, MainModelField, VAEModelField
from invokeai.app.invocations.model import LoRAModelField
from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
from ...version import __version__
@ -27,63 +26,6 @@ class LoRAMetadataField(BaseModelExcludeNull):
weight: float = Field(description="The weight of the LoRA model")
class CoreMetadata(BaseModelExcludeNull):
"""Core generation metadata for an image generated in InvokeAI."""
app_version: str = Field(default=__version__, description="The version of InvokeAI used to generate this image")
generation_mode: str = Field(
description="The generation mode that output this image",
)
created_by: Optional[str] = Field(description="The name of the creator of the image")
positive_prompt: str = Field(description="The positive prompt parameter")
negative_prompt: str = Field(description="The negative prompt parameter")
width: int = Field(description="The width parameter")
height: int = Field(description="The height parameter")
seed: int = Field(description="The seed used for noise generation")
rand_device: str = Field(description="The device used for random number generation")
cfg_scale: float = Field(description="The classifier-free guidance scale parameter")
steps: int = Field(description="The number of steps used for inference")
scheduler: str = Field(description="The scheduler used for inference")
clip_skip: Optional[int] = Field(
default=None,
description="The number of skipped CLIP layers",
)
model: MainModelField = Field(description="The main model used for inference")
controlnets: list[ControlField] = Field(description="The ControlNets used for inference")
loras: list[LoRAMetadataField] = Field(description="The LoRAs used for inference")
vae: Optional[VAEModelField] = Field(
default=None,
description="The VAE used for decoding, if the main model's default was not used",
)
# Latents-to-Latents
strength: Optional[float] = Field(
default=None,
description="The strength used for latents-to-latents",
)
init_image: Optional[str] = Field(default=None, description="The name of the initial image")
# SDXL
positive_style_prompt: Optional[str] = Field(default=None, description="The positive style prompt parameter")
negative_style_prompt: Optional[str] = Field(default=None, description="The negative style prompt parameter")
# SDXL Refiner
refiner_model: Optional[MainModelField] = Field(default=None, description="The SDXL Refiner model used")
refiner_cfg_scale: Optional[float] = Field(
default=None,
description="The classifier-free guidance scale parameter used for the refiner",
)
refiner_steps: Optional[int] = Field(default=None, description="The number of steps used for the refiner")
refiner_scheduler: Optional[str] = Field(default=None, description="The scheduler used for the refiner")
refiner_positive_aesthetic_score: Optional[float] = Field(
default=None, description="The aesthetic score used for the refiner"
)
refiner_negative_aesthetic_score: Optional[float] = Field(
default=None, description="The aesthetic score used for the refiner"
)
refiner_start: Optional[float] = Field(default=None, description="The start value used for refiner denoising")
class ImageMetadata(BaseModelExcludeNull):
"""An image's generation metadata"""
@ -91,97 +33,6 @@ class ImageMetadata(BaseModelExcludeNull):
workflow: Optional[dict] = Field(default=None, description="The workflow associated with the image")
@invocation_output("metadata_accumulator_output")
class MetadataAccumulatorOutput(BaseInvocationOutput):
"""The output of the MetadataAccumulator node"""
metadata: CoreMetadata = OutputField(description="The core metadata for the image")
@invocation(
"metadata_accumulator", title="Metadata Accumulator", tags=["metadata"], category="metadata", version="1.0.0"
)
class MetadataAccumulatorInvocation(BaseInvocation):
"""Outputs a Core Metadata Object"""
generation_mode: str = InputField(
description="The generation mode that output this image",
)
positive_prompt: str = InputField(description="The positive prompt parameter")
negative_prompt: str = InputField(description="The negative prompt parameter")
width: int = InputField(description="The width parameter")
height: int = InputField(description="The height parameter")
seed: int = InputField(description="The seed used for noise generation")
rand_device: str = InputField(description="The device used for random number generation")
cfg_scale: float = InputField(description="The classifier-free guidance scale parameter")
steps: int = InputField(description="The number of steps used for inference")
scheduler: str = InputField(description="The scheduler used for inference")
clip_skip: Optional[int] = Field(
default=None,
description="The number of skipped CLIP layers",
)
model: MainModelField = InputField(description="The main model used for inference")
controlnets: list[ControlField] = InputField(description="The ControlNets used for inference")
loras: list[LoRAMetadataField] = InputField(description="The LoRAs used for inference")
strength: Optional[float] = InputField(
default=None,
description="The strength used for latents-to-latents",
)
init_image: Optional[str] = InputField(
default=None,
description="The name of the initial image",
)
vae: Optional[VAEModelField] = InputField(
default=None,
description="The VAE used for decoding, if the main model's default was not used",
)
# SDXL
positive_style_prompt: Optional[str] = InputField(
default=None,
description="The positive style prompt parameter",
)
negative_style_prompt: Optional[str] = InputField(
default=None,
description="The negative style prompt parameter",
)
# SDXL Refiner
refiner_model: Optional[MainModelField] = InputField(
default=None,
description="The SDXL Refiner model used",
)
refiner_cfg_scale: Optional[float] = InputField(
default=None,
description="The classifier-free guidance scale parameter used for the refiner",
)
refiner_steps: Optional[int] = InputField(
default=None,
description="The number of steps used for the refiner",
)
refiner_scheduler: Optional[str] = InputField(
default=None,
description="The scheduler used for the refiner",
)
refiner_positive_aesthetic_score: Optional[float] = InputField(
default=None,
description="The aesthetic score used for the refiner",
)
refiner_negative_aesthetic_score: Optional[float] = InputField(
default=None,
description="The aesthetic score used for the refiner",
)
refiner_start: Optional[float] = InputField(
default=None,
description="The start value used for refiner denoising",
)
def invoke(self, context: InvocationContext) -> MetadataAccumulatorOutput:
"""Collects and outputs a CoreMetadata object"""
return MetadataAccumulatorOutput(metadata=CoreMetadata(**self.dict()))
class MetadataItem(BaseModel):
label: str = Field(description=FieldDescriptions.metadata_item_label)
value: Any = Field(description=FieldDescriptions.metadata_item_value)
@ -218,20 +69,25 @@ class MetadataDictOutput(BaseInvocationOutput):
@invocation("metadata", title="Metadata", tags=["metadata"], category="metadata", version="1.0.0")
class MetadataInvocation(BaseInvocation):
"""Takes a list of MetadataItems and outputs a MetadataDict."""
"""Takes a MetadataItem or collection of MetadataItems and outputs a MetadataDict."""
items: Union[list[MetadataItem], MetadataItem] = InputField(description=FieldDescriptions.metadata_item_polymorphic)
def invoke(self, context: InvocationContext) -> MetadataDictOutput:
if isinstance(self.items, MetadataItem):
return MetadataDictOutput(metadata_dict=(MetadataDict(data={self.items.label: self.items.value})))
# single metadata item
data = {self.items.label: self.items.value}
else:
# collection of metadata items
data = {item.label: item.value for item in self.items}
return MetadataDictOutput(metadata_dict=(MetadataDict(data={item.label: item.value for item in self.items})))
data.update({"app_version": __version__})
return MetadataDictOutput(metadata_dict=(MetadataDict(data=data)))
@invocation("merge_metadata_dict", title="Metadata Merge", tags=["metadata"], category="metadata", version="1.0.0")
class MergeMetadataDictInvocation(BaseInvocation):
"""Takes a list of MetadataItems and outputs a MetadataDict."""
"""Merged a collection of MetadataDict into a single MetadataDict."""
collection: list[MetadataDict] = InputField(description=FieldDescriptions.metadata_dict_collection)

View File

@ -12,7 +12,7 @@ from diffusers.image_processor import VaeImageProcessor
from pydantic import BaseModel, Field, validator
from tqdm import tqdm
from invokeai.app.invocations.metadata import CoreMetadata, WithMetadata
from invokeai.app.invocations.metadata import WithMetadata
from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput, ImageField, ImageOutput
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend import BaseModelType, ModelType, SubModelType

View File

@ -1252,7 +1252,7 @@ export const isInvocationFieldSchema = (
export type InvocationEdgeExtra = { type: 'default' | 'collapsed' };
const zLoRAMetadataItem = z.object({
export const zLoRAMetadataItem = z.object({
lora: zLoRAModelField.deepPartial(),
weight: z.number(),
});
@ -1279,15 +1279,7 @@ export const zCoreMetadata = z
.nullish()
.catch(null),
controlnets: z.array(zControlField.deepPartial()).nullish().catch(null),
loras: z
.array(
z.object({
lora: zLoRAModelField.deepPartial(),
weight: z.number(),
})
)
.nullish()
.catch(null),
loras: z.array(zLoRAMetadataItem).nullish().catch(null),
vae: zVaeModelField.nullish().catch(null),
strength: z.number().nullish().catch(null),
init_image: z.string().nullish().catch(null),

View File

@ -1,18 +1,16 @@
import { RootState } from 'app/store/store';
import { getValidControlNets } from 'features/controlNet/util/getValidControlNets';
import { omit } from 'lodash-es';
import {
CollectInvocation,
ControlField,
ControlNetInvocation,
MetadataAccumulatorInvocation,
} from 'services/api/types';
import { NonNullableGraph } from '../../types/types';
import { NonNullableGraph, zControlField } from '../../types/types';
import {
CANVAS_COHERENCE_DENOISE_LATENTS,
CONTROL_NET_COLLECT,
METADATA_ACCUMULATOR,
} from './constants';
import { addMainMetadata } from './metadata';
export const addControlNetToLinearGraph = (
state: RootState,
@ -23,12 +21,9 @@ export const addControlNetToLinearGraph = (
const validControlNets = getValidControlNets(controlNets);
const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as
| MetadataAccumulatorInvocation
| undefined;
if (isControlNetEnabled && Boolean(validControlNets.length)) {
if (validControlNets.length) {
const controlnets: ControlField[] = [];
// We have multiple controlnets, add ControlNet collector
const controlNetIterateNode: CollectInvocation = {
id: CONTROL_NET_COLLECT,
@ -87,15 +82,7 @@ export const addControlNetToLinearGraph = (
graph.nodes[controlNetNode.id] = controlNetNode as ControlNetInvocation;
if (metadataAccumulator?.controlnets) {
// metadata accumulator only needs a control field - not the whole node
// extract what we need and add to the accumulator
const controlField = omit(controlNetNode, [
'id',
'type',
]) as ControlField;
metadataAccumulator.controlnets.push(controlField);
}
controlnets.push(zControlField.parse(controlNetNode));
graph.edges.push({
source: { node_id: controlNetNode.id, field: 'control' },
@ -115,6 +102,8 @@ export const addControlNetToLinearGraph = (
});
}
});
addMainMetadata(graph, { controlnets });
}
}
};

View File

@ -38,15 +38,7 @@ export const addIPAdapterToLinearGraph = (
graph.nodes[ipAdapterNode.id] = ipAdapterNode as IPAdapterInvocation;
// if (metadataAccumulator?.ip_adapters) {
// // metadata accumulator only needs the ip_adapter field - not the whole node
// // extract what we need and add to the accumulator
// const ipAdapterField = omit(ipAdapterNode, [
// 'id',
// 'type',
// ]) as IPAdapterField;
// metadataAccumulator.ip_adapters.push(ipAdapterField);
// }
// TODO: add metadata
graph.edges.push({
source: { node_id: ipAdapterNode.id, field: 'ip_adapter' },

View File

@ -1,21 +1,22 @@
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import {
LoRAMetadataItem,
NonNullableGraph,
zLoRAMetadataItem,
} from 'features/nodes/types/types';
import { forEach, size } from 'lodash-es';
import { LoraLoaderInvocation } from 'services/api/types';
import {
LoraLoaderInvocation,
MetadataAccumulatorInvocation,
} from 'services/api/types';
import {
CANVAS_COHERENCE_DENOISE_LATENTS,
CANVAS_INPAINT_GRAPH,
CANVAS_OUTPAINT_GRAPH,
CANVAS_COHERENCE_DENOISE_LATENTS,
CLIP_SKIP,
LORA_LOADER,
MAIN_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
POSITIVE_CONDITIONING,
} from './constants';
import { addMainMetadata } from './metadata';
export const addLoRAsToGraph = (
state: RootState,
@ -33,29 +34,29 @@ export const addLoRAsToGraph = (
const { loras } = state.lora;
const loraCount = size(loras);
const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as
| MetadataAccumulatorInvocation
| undefined;
if (loraCount > 0) {
// Remove modelLoaderNodeId unet connection to feed it to LoRAs
graph.edges = graph.edges.filter(
(e) =>
!(
e.source.node_id === modelLoaderNodeId &&
['unet'].includes(e.source.field)
)
);
// Remove CLIP_SKIP connections to conditionings to feed it through LoRAs
graph.edges = graph.edges.filter(
(e) =>
!(e.source.node_id === CLIP_SKIP && ['clip'].includes(e.source.field))
);
if (loraCount === 0) {
return;
}
// Remove modelLoaderNodeId unet connection to feed it to LoRAs
graph.edges = graph.edges.filter(
(e) =>
!(
e.source.node_id === modelLoaderNodeId &&
['unet'].includes(e.source.field)
)
);
// Remove CLIP_SKIP connections to conditionings to feed it through LoRAs
graph.edges = graph.edges.filter(
(e) =>
!(e.source.node_id === CLIP_SKIP && ['clip'].includes(e.source.field))
);
// we need to remember the last lora so we can chain from it
let lastLoraNodeId = '';
let currentLoraIndex = 0;
const loraMetadata: LoRAMetadataItem[] = [];
forEach(loras, (lora) => {
const { model_name, base_model, weight } = lora;
@ -69,13 +70,12 @@ export const addLoRAsToGraph = (
weight,
};
// add the lora to the metadata accumulator
if (metadataAccumulator?.loras) {
metadataAccumulator.loras.push({
loraMetadata.push(
zLoRAMetadataItem.parse({
lora: { model_name, base_model },
weight,
});
}
})
);
// add to graph
graph.nodes[currentLoraNodeId] = loraLoaderNode;
@ -182,4 +182,6 @@ export const addLoRAsToGraph = (
lastLoraNodeId = currentLoraNodeId;
currentLoraIndex += 1;
});
addMainMetadata(graph, { loras: loraMetadata });
};

View File

@ -1,14 +1,14 @@
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { forEach, size } from 'lodash-es';
import {
MetadataAccumulatorInvocation,
SDXLLoraLoaderInvocation,
} from 'services/api/types';
LoRAMetadataItem,
NonNullableGraph,
zLoRAMetadataItem,
} from 'features/nodes/types/types';
import { forEach, size } from 'lodash-es';
import { SDXLLoraLoaderInvocation } from 'services/api/types';
import {
CANVAS_COHERENCE_DENOISE_LATENTS,
LORA_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
POSITIVE_CONDITIONING,
SDXL_CANVAS_INPAINT_GRAPH,
@ -17,6 +17,7 @@ import {
SDXL_REFINER_INPAINT_CREATE_MASK,
SEAMLESS,
} from './constants';
import { addMainMetadata } from './metadata';
export const addSDXLLoRAsToGraph = (
state: RootState,
@ -34,9 +35,12 @@ export const addSDXLLoRAsToGraph = (
const { loras } = state.lora;
const loraCount = size(loras);
const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as
| MetadataAccumulatorInvocation
| undefined;
if (loraCount === 0) {
return;
}
const loraMetadata: LoRAMetadataItem[] = [];
// Handle Seamless Plugs
const unetLoaderId = modelLoaderNodeId;
@ -47,22 +51,17 @@ export const addSDXLLoRAsToGraph = (
clipLoaderId = SDXL_MODEL_LOADER;
}
if (loraCount > 0) {
// Remove modelLoaderNodeId unet/clip/clip2 connections to feed it to LoRAs
graph.edges = graph.edges.filter(
(e) =>
!(
e.source.node_id === unetLoaderId && ['unet'].includes(e.source.field)
) &&
!(
e.source.node_id === clipLoaderId && ['clip'].includes(e.source.field)
) &&
!(
e.source.node_id === clipLoaderId &&
['clip2'].includes(e.source.field)
)
);
}
// Remove modelLoaderNodeId unet/clip/clip2 connections to feed it to LoRAs
graph.edges = graph.edges.filter(
(e) =>
!(
e.source.node_id === unetLoaderId && ['unet'].includes(e.source.field)
) &&
!(
e.source.node_id === clipLoaderId && ['clip'].includes(e.source.field)
) &&
!(e.source.node_id === clipLoaderId && ['clip2'].includes(e.source.field))
);
// we need to remember the last lora so we can chain from it
let lastLoraNodeId = '';
@ -80,16 +79,12 @@ export const addSDXLLoRAsToGraph = (
weight,
};
// add the lora to the metadata accumulator
if (metadataAccumulator) {
if (!metadataAccumulator.loras) {
metadataAccumulator.loras = [];
}
metadataAccumulator.loras.push({
loraMetadata.push(
zLoRAMetadataItem.parse({
lora: { model_name, base_model },
weight,
});
}
})
);
// add to graph
graph.nodes[currentLoraNodeId] = loraLoaderNode;
@ -242,4 +237,6 @@ export const addSDXLLoRAsToGraph = (
lastLoraNodeId = currentLoraNodeId;
currentLoraIndex += 1;
});
addMainMetadata(graph, { loras: loraMetadata });
};

View File

@ -2,7 +2,6 @@ import { RootState } from 'app/store/store';
import {
CreateDenoiseMaskInvocation,
ImageDTO,
MetadataAccumulatorInvocation,
SeamlessModeInvocation,
} from 'services/api/types';
import { NonNullableGraph } from '../../types/types';
@ -12,7 +11,6 @@ import {
LATENTS_TO_IMAGE,
MASK_COMBINE,
MASK_RESIZE_UP,
METADATA_ACCUMULATOR,
SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH,
SDXL_CANVAS_INPAINT_GRAPH,
SDXL_CANVAS_OUTPAINT_GRAPH,
@ -26,6 +24,7 @@ import {
SDXL_REFINER_SEAMLESS,
} from './constants';
import { buildSDXLStylePrompts } from './helpers/craftSDXLStylePrompt';
import { addMainMetadata } from './metadata';
export const addSDXLRefinerToGraph = (
state: RootState,
@ -57,21 +56,15 @@ export const addSDXLRefinerToGraph = (
return;
}
const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as
| MetadataAccumulatorInvocation
| undefined;
if (metadataAccumulator) {
metadataAccumulator.refiner_model = refinerModel;
metadataAccumulator.refiner_positive_aesthetic_score =
refinerPositiveAestheticScore;
metadataAccumulator.refiner_negative_aesthetic_score =
refinerNegativeAestheticScore;
metadataAccumulator.refiner_cfg_scale = refinerCFGScale;
metadataAccumulator.refiner_scheduler = refinerScheduler;
metadataAccumulator.refiner_start = refinerStart;
metadataAccumulator.refiner_steps = refinerSteps;
}
addMainMetadata(graph, {
refiner_model: refinerModel,
refiner_positive_aesthetic_score: refinerPositiveAestheticScore,
refiner_negative_aesthetic_score: refinerNegativeAestheticScore,
refiner_cfg_scale: refinerCFGScale,
refiner_scheduler: refinerScheduler,
refiner_start: refinerStart,
refiner_steps: refinerSteps,
});
const modelLoaderId = modelLoaderNodeId
? modelLoaderNodeId

View File

@ -1,18 +1,14 @@
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import { SaveImageInvocation } from 'services/api/types';
import {
CANVAS_OUTPUT,
LATENTS_TO_IMAGE,
METADATA_ACCUMULATOR,
NSFW_CHECKER,
SAVE_IMAGE,
WATERMARKER,
} from './constants';
import {
MetadataAccumulatorInvocation,
SaveImageInvocation,
} from 'services/api/types';
import { RootState } from 'app/store/store';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
/**
* Set the `use_cache` field on the linear/canvas graph's final image output node to False.
@ -36,23 +32,6 @@ export const addSaveImageNode = (
graph.nodes[SAVE_IMAGE] = saveImageNode;
const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as
| MetadataAccumulatorInvocation
| undefined;
if (metadataAccumulator) {
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: SAVE_IMAGE,
field: 'metadata',
},
});
}
const destination = {
node_id: SAVE_IMAGE,
field: 'image',

View File

@ -1,6 +1,7 @@
import { RootState } from 'app/store/store';
import { SeamlessModeInvocation } from 'services/api/types';
import { NonNullableGraph } from '../../types/types';
import { addMainMetadata } from './metadata';
import {
CANVAS_COHERENCE_DENOISE_LATENTS,
CANVAS_INPAINT_GRAPH,
@ -31,6 +32,11 @@ export const addSeamlessToLinearGraph = (
seamless_y: seamlessYAxis,
} as SeamlessModeInvocation;
addMainMetadata(graph, {
seamless_x: seamlessXAxis,
seamless_y: seamlessYAxis,
});
let denoisingNodeId = DENOISE_LATENTS;
if (

View File

@ -1,6 +1,5 @@
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { MetadataAccumulatorInvocation } from 'services/api/types';
import {
CANVAS_COHERENCE_INPAINT_CREATE_MASK,
CANVAS_IMAGE_TO_IMAGE_GRAPH,
@ -14,7 +13,6 @@ import {
INPAINT_IMAGE,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
METADATA_ACCUMULATOR,
ONNX_MODEL_LOADER,
SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH,
SDXL_CANVAS_INPAINT_GRAPH,
@ -26,6 +24,7 @@ import {
TEXT_TO_IMAGE_GRAPH,
VAE_LOADER,
} from './constants';
import { addMainMetadata } from './metadata';
export const addVAEToGraph = (
state: RootState,
@ -41,9 +40,6 @@ export const addVAEToGraph = (
);
const isAutoVae = !vae;
const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as
| MetadataAccumulatorInvocation
| undefined;
if (!isAutoVae) {
graph.nodes[VAE_LOADER] = {
@ -181,7 +177,7 @@ export const addVAEToGraph = (
}
}
if (vae && metadataAccumulator) {
metadataAccumulator.vae = vae;
if (vae) {
addMainMetadata(graph, { vae });
}
};

View File

@ -5,14 +5,8 @@ import {
ImageNSFWBlurInvocation,
ImageWatermarkInvocation,
LatentsToImageInvocation,
MetadataAccumulatorInvocation,
} from 'services/api/types';
import {
LATENTS_TO_IMAGE,
METADATA_ACCUMULATOR,
NSFW_CHECKER,
WATERMARKER,
} from './constants';
import { LATENTS_TO_IMAGE, NSFW_CHECKER, WATERMARKER } from './constants';
export const addWatermarkerToGraph = (
state: RootState,
@ -32,10 +26,6 @@ export const addWatermarkerToGraph = (
| ImageNSFWBlurInvocation
| undefined;
const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as
| MetadataAccumulatorInvocation
| undefined;
if (!nodeToAddTo) {
// something has gone terribly awry
return;
@ -80,17 +70,4 @@ export const addWatermarkerToGraph = (
},
});
}
if (metadataAccumulator) {
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: WATERMARKER,
field: 'metadata',
},
});
}
};

View File

@ -1,12 +1,13 @@
import { BoardId } from 'features/gallery/store/types';
import { NonNullableGraph } from 'features/nodes/types/types';
import { ESRGANModelName } from 'features/parameters/store/postprocessingSlice';
import {
Graph,
ESRGANInvocation,
Graph,
SaveImageInvocation,
} from 'services/api/types';
import { REALESRGAN as ESRGAN, SAVE_IMAGE } from './constants';
import { BoardId } from 'features/gallery/store/types';
import { addMainMetadataNodeToGraph } from './metadata';
type Arg = {
image_name: string;
@ -55,5 +56,9 @@ export const buildAdHocUpscaleGraph = ({
],
};
addMainMetadataNodeToGraph(graph, {
model: esrganModelName,
});
return graph;
};

View File

@ -19,12 +19,12 @@ import {
IMG2IMG_RESIZE,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
SEAMLESS,
} from './constants';
import { addMainMetadataNodeToGraph } from './metadata';
/**
* Builds the Canvas tab's Image to Image graph.
@ -307,10 +307,7 @@ export const buildCanvasImageToImageGraph = (
});
}
// add metadata accumulator, which is only mostly populated - some fields are added later
graph.nodes[METADATA_ACCUMULATOR] = {
id: METADATA_ACCUMULATOR,
type: 'metadata_accumulator',
addMainMetadataNodeToGraph(graph, {
generation_mode: 'img2img',
cfg_scale,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
@ -324,13 +321,10 @@ export const buildCanvasImageToImageGraph = (
steps,
rand_device: use_cpu ? 'cpu' : 'cuda',
scheduler,
vae: undefined, // option; set in addVAEToGraph
controlnets: [], // populated in addControlNetToLinearGraph
loras: [], // populated in addLoRAsToGraph
clip_skip: clipSkip,
strength,
init_image: initialImage.image_name,
};
});
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {

View File

@ -16,7 +16,6 @@ import {
IMAGE_TO_LATENTS,
IMG2IMG_RESIZE,
LATENTS_TO_IMAGE,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
@ -27,6 +26,7 @@ import {
SEAMLESS,
} from './constants';
import { buildSDXLStylePrompts } from './helpers/craftSDXLStylePrompt';
import { addMainMetadataNodeToGraph } from './metadata';
/**
* Builds the Canvas tab's Image to Image graph.
@ -318,10 +318,7 @@ export const buildCanvasSDXLImageToImageGraph = (
});
}
// add metadata accumulator, which is only mostly populated - some fields are added later
graph.nodes[METADATA_ACCUMULATOR] = {
id: METADATA_ACCUMULATOR,
type: 'metadata_accumulator',
addMainMetadataNodeToGraph(graph, {
generation_mode: 'img2img',
cfg_scale,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
@ -335,22 +332,8 @@ export const buildCanvasSDXLImageToImageGraph = (
steps,
rand_device: use_cpu ? 'cpu' : 'cuda',
scheduler,
vae: undefined, // option; set in addVAEToGraph
controlnets: [], // populated in addControlNetToLinearGraph
loras: [], // populated in addLoRAsToGraph
strength,
init_image: initialImage.image_name,
};
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'metadata',
},
});
// Add Seamless To Graph

View File

@ -17,7 +17,6 @@ import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
CANVAS_OUTPUT,
LATENTS_TO_IMAGE,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
ONNX_MODEL_LOADER,
@ -29,6 +28,7 @@ import {
SEAMLESS,
} from './constants';
import { buildSDXLStylePrompts } from './helpers/craftSDXLStylePrompt';
import { addMainMetadataNodeToGraph } from './metadata';
/**
* Builds the Canvas tab's Text to Image graph.
@ -300,10 +300,7 @@ export const buildCanvasSDXLTextToImageGraph = (
});
}
// add metadata accumulator, which is only mostly populated - some fields are added later
graph.nodes[METADATA_ACCUMULATOR] = {
id: METADATA_ACCUMULATOR,
type: 'metadata_accumulator',
addMainMetadataNodeToGraph(graph, {
generation_mode: 'txt2img',
cfg_scale,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
@ -317,20 +314,6 @@ export const buildCanvasSDXLTextToImageGraph = (
steps,
rand_device: use_cpu ? 'cpu' : 'cuda',
scheduler,
vae: undefined, // option; set in addVAEToGraph
controlnets: [], // populated in addControlNetToLinearGraph
loras: [], // populated in addLoRAsToGraph
};
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'metadata',
},
});
// Add Seamless To Graph

View File

@ -20,13 +20,13 @@ import {
DENOISE_LATENTS,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
ONNX_MODEL_LOADER,
POSITIVE_CONDITIONING,
SEAMLESS,
} from './constants';
import { addMainMetadataNodeToGraph } from './metadata';
/**
* Builds the Canvas tab's Text to Image graph.
@ -288,10 +288,7 @@ export const buildCanvasTextToImageGraph = (
});
}
// add metadata accumulator, which is only mostly populated - some fields are added later
graph.nodes[METADATA_ACCUMULATOR] = {
id: METADATA_ACCUMULATOR,
type: 'metadata_accumulator',
addMainMetadataNodeToGraph(graph, {
generation_mode: 'txt2img',
cfg_scale,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
@ -305,21 +302,7 @@ export const buildCanvasTextToImageGraph = (
steps,
rand_device: use_cpu ? 'cpu' : 'cuda',
scheduler,
vae: undefined, // option; set in addVAEToGraph
controlnets: [], // populated in addControlNetToLinearGraph
loras: [], // populated in addLoRAsToGraph
clip_skip: clipSkip,
};
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'metadata',
},
});
// Add Seamless To Graph

View File

@ -4,13 +4,20 @@ import { generateSeeds } from 'common/util/generateSeeds';
import { NonNullableGraph } from 'features/nodes/types/types';
import { range, unset } from 'lodash-es';
import { components } from 'services/api/schema';
import { Batch, BatchConfig } from 'services/api/types';
import { Batch, BatchConfig, MetadataItemInvocation } from 'services/api/types';
import {
BATCH_PROMPT,
BATCH_SEED,
BATCH_STYLE_PROMPT,
CANVAS_COHERENCE_NOISE,
METADATA_ACCUMULATOR,
NOISE,
POSITIVE_CONDITIONING,
} from './constants';
import {
addBatchMetadataNodeToGraph,
removeMetadataFromMainMetadataNode,
} from './metadata';
export const prepareLinearUIBatch = (
state: RootState,
@ -23,8 +30,27 @@ export const prepareLinearUIBatch = (
const data: Batch['data'] = [];
const seedMetadataItemNode: MetadataItemInvocation = {
id: BATCH_SEED,
type: 'metadata_item',
label: 'seed',
};
const promptMetadataItemNode: MetadataItemInvocation = {
id: BATCH_PROMPT,
type: 'metadata_item',
label: 'positive_prompt',
};
const stylePromptMetadataItemNode: MetadataItemInvocation = {
id: BATCH_STYLE_PROMPT,
type: 'metadata_item',
label: 'positive_style_prompt',
};
const itemNodesIds: string[] = [];
if (prompts.length === 1) {
unset(graph.nodes[METADATA_ACCUMULATOR], 'seed');
const seeds = generateSeeds({
count: iterations,
start: shouldRandomizeSeed ? undefined : seed,
@ -40,13 +66,15 @@ export const prepareLinearUIBatch = (
});
}
if (graph.nodes[METADATA_ACCUMULATOR]) {
zipped.push({
node_path: METADATA_ACCUMULATOR,
field_name: 'seed',
items: seeds,
});
}
// add to metadata
removeMetadataFromMainMetadataNode(graph, 'seed');
itemNodesIds.push(BATCH_SEED);
graph.nodes[BATCH_SEED] = seedMetadataItemNode;
zipped.push({
node_path: BATCH_SEED,
field_name: 'value',
items: seeds,
});
if (graph.nodes[CANVAS_COHERENCE_NOISE]) {
zipped.push({
@ -77,13 +105,15 @@ export const prepareLinearUIBatch = (
});
}
if (graph.nodes[METADATA_ACCUMULATOR]) {
firstBatchDatumList.push({
node_path: METADATA_ACCUMULATOR,
field_name: 'seed',
items: seeds,
});
}
// add to metadata
removeMetadataFromMainMetadataNode(graph, 'seed');
itemNodesIds.push(BATCH_SEED);
graph.nodes[BATCH_SEED] = seedMetadataItemNode;
firstBatchDatumList.push({
node_path: BATCH_SEED,
field_name: 'value',
items: seeds,
});
if (graph.nodes[CANVAS_COHERENCE_NOISE]) {
firstBatchDatumList.push({
@ -106,13 +136,17 @@ export const prepareLinearUIBatch = (
items: seeds,
});
}
if (graph.nodes[METADATA_ACCUMULATOR]) {
secondBatchDatumList.push({
node_path: METADATA_ACCUMULATOR,
field_name: 'seed',
items: seeds,
});
}
// add to metadata
removeMetadataFromMainMetadataNode(graph, 'seed');
itemNodesIds.push(BATCH_SEED);
graph.nodes[BATCH_SEED] = seedMetadataItemNode;
secondBatchDatumList.push({
node_path: BATCH_SEED,
field_name: 'value',
items: seeds,
});
if (graph.nodes[CANVAS_COHERENCE_NOISE]) {
secondBatchDatumList.push({
node_path: CANVAS_COHERENCE_NOISE,
@ -137,13 +171,15 @@ export const prepareLinearUIBatch = (
});
}
if (graph.nodes[METADATA_ACCUMULATOR]) {
firstBatchDatumList.push({
node_path: METADATA_ACCUMULATOR,
field_name: 'positive_prompt',
items: extendedPrompts,
});
}
// add to metadata
removeMetadataFromMainMetadataNode(graph, 'positive_prompt');
itemNodesIds.push(BATCH_PROMPT);
graph.nodes[BATCH_PROMPT] = promptMetadataItemNode;
firstBatchDatumList.push({
node_path: BATCH_PROMPT,
field_name: 'value',
items: extendedPrompts,
});
if (shouldConcatSDXLStylePrompt && model?.base_model === 'sdxl') {
unset(graph.nodes[METADATA_ACCUMULATOR], 'positive_style_prompt');
@ -160,18 +196,22 @@ export const prepareLinearUIBatch = (
});
}
if (graph.nodes[METADATA_ACCUMULATOR]) {
firstBatchDatumList.push({
node_path: METADATA_ACCUMULATOR,
field_name: 'positive_style_prompt',
items: stylePrompts,
});
}
// add to metadata
removeMetadataFromMainMetadataNode(graph, 'positive_style_prompt');
itemNodesIds.push(BATCH_STYLE_PROMPT);
graph.nodes[BATCH_STYLE_PROMPT] = stylePromptMetadataItemNode;
firstBatchDatumList.push({
node_path: BATCH_STYLE_PROMPT,
field_name: 'value',
items: extendedPrompts,
});
}
data.push(firstBatchDatumList);
}
addBatchMetadataNodeToGraph(graph, itemNodesIds);
const enqueueBatchArg: BatchConfig = {
prepend,
batch: {

View File

@ -20,13 +20,13 @@ import {
IMAGE_TO_LATENTS,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
RESIZE,
SEAMLESS,
} from './constants';
import { addMainMetadataNodeToGraph } from './metadata';
/**
* Builds the Image to Image tab graph.
@ -310,10 +310,7 @@ export const buildLinearImageToImageGraph = (
});
}
// add metadata accumulator, which is only mostly populated - some fields are added later
graph.nodes[METADATA_ACCUMULATOR] = {
id: METADATA_ACCUMULATOR,
type: 'metadata_accumulator',
addMainMetadataNodeToGraph(graph, {
generation_mode: 'img2img',
cfg_scale,
height,
@ -325,23 +322,9 @@ export const buildLinearImageToImageGraph = (
steps,
rand_device: use_cpu ? 'cpu' : 'cuda',
scheduler,
vae: undefined, // option; set in addVAEToGraph
controlnets: [], // populated in addControlNetToLinearGraph
loras: [], // populated in addLoRAsToGraph
clip_skip: clipSkip,
strength,
init_image: initialImage.imageName,
};
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'metadata',
},
});
// Add Seamless To Graph

View File

@ -17,7 +17,6 @@ import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
IMAGE_TO_LATENTS,
LATENTS_TO_IMAGE,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
@ -29,6 +28,7 @@ import {
SEAMLESS,
} from './constants';
import { buildSDXLStylePrompts } from './helpers/craftSDXLStylePrompt';
import { addMainMetadataNodeToGraph } from './metadata';
/**
* Builds the Image to Image tab graph.
@ -330,10 +330,7 @@ export const buildLinearSDXLImageToImageGraph = (
});
}
// add metadata accumulator, which is only mostly populated - some fields are added later
graph.nodes[METADATA_ACCUMULATOR] = {
id: METADATA_ACCUMULATOR,
type: 'metadata_accumulator',
addMainMetadataNodeToGraph(graph, {
generation_mode: 'sdxl_img2img',
cfg_scale,
height,
@ -345,24 +342,10 @@ export const buildLinearSDXLImageToImageGraph = (
steps,
rand_device: use_cpu ? 'cpu' : 'cuda',
scheduler,
vae: undefined,
controlnets: [],
loras: [],
strength: strength,
strength,
init_image: initialImage.imageName,
positive_style_prompt: positiveStylePrompt,
negative_style_prompt: negativeStylePrompt,
};
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'metadata',
},
});
// Add Seamless To Graph

View File

@ -10,9 +10,9 @@ import { addSaveImageNode } from './addSaveImageNode';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import { addMainMetadataNodeToGraph } from './metadata';
import {
LATENTS_TO_IMAGE,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
@ -224,10 +224,7 @@ export const buildLinearSDXLTextToImageGraph = (
],
};
// add metadata accumulator, which is only mostly populated - some fields are added later
graph.nodes[METADATA_ACCUMULATOR] = {
id: METADATA_ACCUMULATOR,
type: 'metadata_accumulator',
addMainMetadataNodeToGraph(graph, {
generation_mode: 'sdxl_txt2img',
cfg_scale,
height,
@ -239,22 +236,8 @@ export const buildLinearSDXLTextToImageGraph = (
steps,
rand_device: use_cpu ? 'cpu' : 'cuda',
scheduler,
vae: undefined,
controlnets: [],
loras: [],
positive_style_prompt: positiveStylePrompt,
negative_style_prompt: negativeStylePrompt,
};
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'metadata',
},
});
// Add Seamless To Graph

View File

@ -13,12 +13,12 @@ import { addSaveImageNode } from './addSaveImageNode';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import { addMainMetadataNodeToGraph } from './metadata';
import {
CLIP_SKIP,
DENOISE_LATENTS,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
ONNX_MODEL_LOADER,
@ -232,10 +232,7 @@ export const buildLinearTextToImageGraph = (
],
};
// add metadata accumulator, which is only mostly populated - some fields are added later
graph.nodes[METADATA_ACCUMULATOR] = {
id: METADATA_ACCUMULATOR,
type: 'metadata_accumulator',
addMainMetadataNodeToGraph(graph, {
generation_mode: 'txt2img',
cfg_scale,
height,
@ -247,21 +244,7 @@ export const buildLinearTextToImageGraph = (
steps,
rand_device: use_cpu ? 'cpu' : 'cuda',
scheduler,
vae: undefined, // option; set in addVAEToGraph
controlnets: [], // populated in addControlNetToLinearGraph
loras: [], // populated in addLoRAsToGraph
clip_skip: clipSkip,
};
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'metadata',
},
});
// Add Seamless To Graph

View File

@ -50,7 +50,15 @@ export const IP_ADAPTER = 'ip_adapter';
export const DYNAMIC_PROMPT = 'dynamic_prompt';
export const IMAGE_COLLECTION = 'image_collection';
export const IMAGE_COLLECTION_ITERATE = 'image_collection_iterate';
export const METADATA = 'metadata';
export const BATCH_METADATA = 'batch_metadata';
export const BATCH_METADATA_COLLECT = 'batch_metadata_collect';
export const BATCH_SEED = 'batch_seed';
export const BATCH_PROMPT = 'batch_prompt';
export const BATCH_STYLE_PROMPT = 'batch_style_prompt';
export const METADATA_COLLECT = 'metadata_collect';
export const METADATA_ACCUMULATOR = 'metadata_accumulator';
export const MERGE_METADATA = 'merge_metadata';
export const REALESRGAN = 'esrgan';
export const DIVIDE = 'divide';
export const SCALE = 'scale_image';

View File

@ -0,0 +1,151 @@
import { NonNullableGraph } from 'features/nodes/types/types';
import { map } from 'lodash-es';
import { MetadataInvocationAsCollection } from 'services/api/types';
import { JsonObject } from 'type-fest';
import {
BATCH_METADATA,
BATCH_METADATA_COLLECT,
MERGE_METADATA,
METADATA,
METADATA_COLLECT,
SAVE_IMAGE,
} from './constants';
export const addMainMetadataNodeToGraph = (
graph: NonNullableGraph,
metadata: JsonObject
): void => {
graph.nodes[METADATA] = {
id: METADATA,
type: 'metadata',
items: map(metadata, (value, label) => ({ label, value })),
};
graph.nodes[METADATA_COLLECT] = {
id: METADATA_COLLECT,
type: 'collect',
};
graph.nodes[MERGE_METADATA] = {
id: MERGE_METADATA,
type: 'merge_metadata_dict',
};
graph.edges.push({
source: {
node_id: METADATA,
field: 'metadata_dict',
},
destination: {
node_id: METADATA_COLLECT,
field: 'item',
},
});
graph.edges.push({
source: {
node_id: METADATA_COLLECT,
field: 'collection',
},
destination: {
node_id: MERGE_METADATA,
field: 'collection',
},
});
graph.edges.push({
source: {
node_id: MERGE_METADATA,
field: 'metadata_dict',
},
destination: {
node_id: SAVE_IMAGE,
field: 'metadata',
},
});
return;
};
export const addMainMetadata = (
graph: NonNullableGraph,
metadata: JsonObject
): void => {
const metadataNode = graph.nodes[METADATA] as
| MetadataInvocationAsCollection
| undefined;
if (!metadataNode) {
return;
}
metadataNode.items.push(
...map(metadata, (value, label) => ({ label, value }))
);
};
export const removeMetadataFromMainMetadataNode = (
graph: NonNullableGraph,
label: string
): void => {
const metadataNode = graph.nodes[METADATA] as
| MetadataInvocationAsCollection
| undefined;
if (!metadataNode) {
return;
}
metadataNode.items = metadataNode.items.filter(
(item) => item.label !== label
);
};
export const addBatchMetadataNodeToGraph = (
graph: NonNullableGraph,
itemNodeIds: string[]
) => {
graph.nodes[BATCH_METADATA] = {
id: BATCH_METADATA,
type: 'metadata',
};
graph.nodes[BATCH_METADATA_COLLECT] = {
id: BATCH_METADATA_COLLECT,
type: 'collect',
};
itemNodeIds.forEach((id) => {
graph.edges.push({
source: {
node_id: id,
field: 'item',
},
destination: {
node_id: BATCH_METADATA_COLLECT,
field: 'item',
},
});
});
graph.edges.push({
source: {
node_id: BATCH_METADATA_COLLECT,
field: 'collection',
},
destination: {
node_id: BATCH_METADATA,
field: 'items',
},
});
graph.edges.push({
source: {
node_id: BATCH_METADATA,
field: 'metadata_dict',
},
destination: {
node_id: METADATA_COLLECT,
field: 'item',
},
});
};

View File

@ -19,10 +19,7 @@ const RESERVED_INPUT_FIELD_NAMES = ['id', 'type', 'use_cache'];
const RESERVED_OUTPUT_FIELD_NAMES = ['type'];
const RESERVED_FIELD_TYPES = ['IsIntermediate', 'WorkflowField'];
const invocationDenylist: AnyInvocationType[] = [
'graph',
'metadata_accumulator',
];
const invocationDenylist: AnyInvocationType[] = ['graph'];
const isReservedInputField = (nodeType: string, fieldName: string) => {
if (RESERVED_INPUT_FIELD_NAMES.includes(fieldName)) {

File diff suppressed because one or more lines are too long

View File

@ -147,6 +147,15 @@ export type ImageNSFWBlurInvocation = s['ImageNSFWBlurInvocation'];
export type ImageWatermarkInvocation = s['ImageWatermarkInvocation'];
export type SeamlessModeInvocation = s['SeamlessModeInvocation'];
export type SaveImageInvocation = s['SaveImageInvocation'];
export type MetadataInvocation = s['MetadataInvocation'];
export type MetadataInvocationAsCollection = Omit<
s['MetadataInvocation'],
'items'
> & {
items: s['MetadataItem'][];
};
export type MetadataItemInvocation = s['MetadataItemInvocation'];
export type MergeMetadataDictInvocation = s['MergeMetadataDictInvocation'];
// ControlNet Nodes
export type ControlNetInvocation = s['ControlNetInvocation'];