tidy(ui): remove old canvas graphs

This commit is contained in:
psychedelicious 2024-06-28 18:11:26 +10:00
parent 89de04775e
commit c5172d4c5a
20 changed files with 0 additions and 5153 deletions

View File

@ -1,150 +0,0 @@
import type { RootState } from 'app/store/store';
import { selectValidControlNets } from 'features/controlAdapters/store/controlAdaptersSlice';
import type { ControlAdapterProcessorType, ControlNetConfig } from 'features/controlAdapters/store/types';
import type { ImageField } from 'features/nodes/types/common';
import { upsertMetadata } from 'features/nodes/util/graph/canvas/metadata';
import { CONTROL_NET_COLLECT } from 'features/nodes/util/graph/constants';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import type { Invocation, NonNullableGraph, S } from 'services/api/types';
import { assert } from 'tsafe';
export const addControlNetToLinearGraph = async (
state: RootState,
graph: NonNullableGraph,
baseNodeId: string
): Promise<void> => {
const controlNetMetadata: S['CoreMetadataInvocation']['controlnets'] = [];
const controlNets = selectValidControlNets(state.controlAdapters).filter(
({ model, processedControlImage, processorType, controlImage, isEnabled }) => {
const hasModel = Boolean(model);
const doesBaseMatch = model?.base === state.canvasV2.params.model?.base;
const hasControlImage = (processedControlImage && processorType !== 'none') || controlImage;
return isEnabled && hasModel && doesBaseMatch && hasControlImage;
}
);
// The generation tab has special handling - its control adapters are set up in the Control Layers graph helper.
const activeTabName = activeTabNameSelector(state);
assert(activeTabName !== 'generation', 'Tried to use addControlNetToLinearGraph on generation tab');
if (controlNets.length) {
// Even though denoise_latents' control input is SINGLE_OR_COLLECTION, keep it simple and always use a collect
const controlNetIterateNode: Invocation<'collect'> = {
id: CONTROL_NET_COLLECT,
type: 'collect',
is_intermediate: true,
};
graph.nodes[CONTROL_NET_COLLECT] = controlNetIterateNode;
graph.edges.push({
source: { node_id: CONTROL_NET_COLLECT, field: 'collection' },
destination: {
node_id: baseNodeId,
field: 'control',
},
});
for (const controlNet of controlNets) {
if (!controlNet.model) {
return;
}
const {
id,
controlImage,
processedControlImage,
beginStepPct,
endStepPct,
controlMode,
resizeMode,
model,
processorType,
weight,
} = controlNet;
const controlNetNode: Invocation<'controlnet'> = {
id: `control_net_${id}`,
type: 'controlnet',
is_intermediate: true,
begin_step_percent: beginStepPct,
end_step_percent: endStepPct,
control_mode: controlMode,
resize_mode: resizeMode,
control_model: model,
control_weight: weight,
image: buildControlImage(controlImage, processedControlImage, processorType),
};
graph.nodes[controlNetNode.id] = controlNetNode;
controlNetMetadata.push(buildControlNetMetadata(controlNet));
graph.edges.push({
source: { node_id: controlNetNode.id, field: 'control' },
destination: {
node_id: CONTROL_NET_COLLECT,
field: 'item',
},
});
}
upsertMetadata(graph, { controlnets: controlNetMetadata });
}
};
const buildControlImage = (
controlImage: string | null,
processedControlImage: string | null,
processorType: ControlAdapterProcessorType
): ImageField => {
let image: ImageField | null = null;
if (processedControlImage && processorType !== 'none') {
// We've already processed the image in the app, so we can just use the processed image
image = {
image_name: processedControlImage,
};
} else if (controlImage) {
// The control image is preprocessed
image = {
image_name: controlImage,
};
}
assert(image, 'ControlNet image is required');
return image;
};
const buildControlNetMetadata = (controlNet: ControlNetConfig): S['ControlNetMetadataField'] => {
const {
controlImage,
processedControlImage,
beginStepPct,
endStepPct,
controlMode,
resizeMode,
model,
processorType,
weight,
} = controlNet;
assert(model, 'ControlNet model is required');
const processed_image =
processedControlImage && processorType !== 'none'
? {
image_name: processedControlImage,
}
: null;
assert(controlImage, 'ControlNet image is required');
return {
control_model: model,
control_weight: weight,
control_mode: controlMode,
begin_step_percent: beginStepPct,
end_step_percent: endStepPct,
resize_mode: resizeMode,
image: {
image_name: controlImage,
},
processed_image,
};
};

View File

@ -1,109 +0,0 @@
import type { RootState } from 'app/store/store';
import { selectValidIPAdapters } from 'features/controlAdapters/store/controlAdaptersSlice';
import type { IPAdapterConfig } from 'features/controlAdapters/store/types';
import type { ImageField } from 'features/nodes/types/common';
import { upsertMetadata } from 'features/nodes/util/graph/canvas/metadata';
import { IP_ADAPTER_COLLECT } from 'features/nodes/util/graph/constants';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import type { Invocation, NonNullableGraph, S } from 'services/api/types';
import { assert } from 'tsafe';
export const addIPAdapterToLinearGraph = async (
state: RootState,
graph: NonNullableGraph,
baseNodeId: string
): Promise<void> => {
// The generation tab has special handling - its control adapters are set up in the Control Layers graph helper.
const activeTabName = activeTabNameSelector(state);
assert(activeTabName !== 'generation', 'Tried to use addT2IAdaptersToLinearGraph on generation tab');
const ipAdapters = selectValidIPAdapters(state.controlAdapters).filter(({ model, controlImage, isEnabled }) => {
const hasModel = Boolean(model);
const doesBaseMatch = model?.base === state.canvasV2.params.model?.base;
const hasControlImage = controlImage;
return isEnabled && hasModel && doesBaseMatch && hasControlImage;
});
if (ipAdapters.length) {
// Even though denoise_latents' ip adapter input is SINGLE_OR_COLLECTION, keep it simple and always use a collect
const ipAdapterCollectNode: Invocation<'collect'> = {
id: IP_ADAPTER_COLLECT,
type: 'collect',
is_intermediate: true,
};
graph.nodes[IP_ADAPTER_COLLECT] = ipAdapterCollectNode;
graph.edges.push({
source: { node_id: IP_ADAPTER_COLLECT, field: 'collection' },
destination: {
node_id: baseNodeId,
field: 'ip_adapter',
},
});
const ipAdapterMetdata: S['CoreMetadataInvocation']['ipAdapters'] = [];
for (const ipAdapter of ipAdapters) {
if (!ipAdapter.model) {
return;
}
const { id, weight, model, clipVisionModel, method, beginStepPct, endStepPct, controlImage } = ipAdapter;
assert(controlImage, 'IP Adapter image is required');
const ipAdapterNode: Invocation<'ip_adapter'> = {
id: `ip_adapter_${id}`,
type: 'ip_adapter',
is_intermediate: true,
weight: weight,
method: method,
ip_adapter_model: model,
clip_vision_model: clipVisionModel,
begin_step_percent: beginStepPct,
end_step_percent: endStepPct,
image: {
image_name: controlImage,
},
};
graph.nodes[ipAdapterNode.id] = ipAdapterNode;
ipAdapterMetdata.push(buildIPAdapterMetadata(ipAdapter));
graph.edges.push({
source: { node_id: ipAdapterNode.id, field: 'ip_adapter' },
destination: {
node_id: ipAdapterCollectNode.id,
field: 'item',
},
});
}
upsertMetadata(graph, { ipAdapters: ipAdapterMetdata });
}
};
const buildIPAdapterMetadata = (ipAdapter: IPAdapterConfig): S['IPAdapterMetadataField'] => {
const { controlImage, beginStepPct, endStepPct, model, clipVisionModel, method, weight } = ipAdapter;
assert(model, 'IP Adapter model is required');
let image: ImageField | null = null;
if (controlImage) {
image = {
image_name: controlImage,
};
}
assert(image, 'IP Adapter image is required');
return {
ip_adapter_model: model,
clip_vision_model: clipVisionModel,
weight,
method,
begin_step_percent: beginStepPct,
end_step_percent: endStepPct,
image,
};
};

View File

@ -1,158 +0,0 @@
import type { RootState } from 'app/store/store';
import { zModelIdentifierField } from 'features/nodes/types/common';
import { upsertMetadata } from 'features/nodes/util/graph/canvas/metadata';
import {
CLIP_SKIP,
LORA_LOADER,
MAIN_MODEL_LOADER,
NEGATIVE_CONDITIONING,
POSITIVE_CONDITIONING,
} from 'features/nodes/util/graph/constants';
import { filter, size } from 'lodash-es';
import type { Invocation, NonNullableGraph, S } from 'services/api/types';
export const addLoRAsToGraph = async (
state: RootState,
graph: NonNullableGraph,
baseNodeId: string,
modelLoaderNodeId: string = MAIN_MODEL_LOADER
): Promise<void> => {
/**
* LoRA nodes get the UNet and CLIP models from the main model loader and apply the LoRA to them.
* They then output the UNet and CLIP models references on to either the next LoRA in the chain,
* or to the inference/conditioning nodes.
*
* So we need to inject a LoRA chain into the graph.
*/
// TODO(MM2): check base model
const enabledLoRAs = filter(state.lora.loras, (l) => l.isEnabled ?? false);
const loraCount = size(enabledLoRAs);
if (loraCount === 0) {
return;
}
// Remove modelLoaderNodeId unet connection to feed it to LoRAs
graph.edges = graph.edges.filter(
(e) => !(e.source.node_id === modelLoaderNodeId && ['unet'].includes(e.source.field))
);
// Remove CLIP_SKIP connections to conditionings to feed it through LoRAs
graph.edges = graph.edges.filter((e) => !(e.source.node_id === CLIP_SKIP && ['clip'].includes(e.source.field)));
// we need to remember the last lora so we can chain from it
let lastLoraNodeId = '';
let currentLoraIndex = 0;
const loraMetadata: S['CoreMetadataInvocation']['loras'] = [];
enabledLoRAs.forEach(async (lora) => {
const { weight } = lora;
const { key } = lora.model;
const currentLoraNodeId = `${LORA_LOADER}_${key}`;
const parsedModel = zModelIdentifierField.parse(lora.model);
const loraLoaderNode: Invocation<'lora_loader'> = {
type: 'lora_loader',
id: currentLoraNodeId,
is_intermediate: true,
lora: parsedModel,
weight,
};
loraMetadata.push({
model: parsedModel,
weight,
});
// add to graph
graph.nodes[currentLoraNodeId] = loraLoaderNode;
if (currentLoraIndex === 0) {
// first lora = start the lora chain, attach directly to model loader
graph.edges.push({
source: {
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
node_id: currentLoraNodeId,
field: 'unet',
},
});
graph.edges.push({
source: {
node_id: CLIP_SKIP,
field: 'clip',
},
destination: {
node_id: currentLoraNodeId,
field: 'clip',
},
});
} else {
// we are in the middle of the lora chain, instead connect to the previous lora
graph.edges.push({
source: {
node_id: lastLoraNodeId,
field: 'unet',
},
destination: {
node_id: currentLoraNodeId,
field: 'unet',
},
});
graph.edges.push({
source: {
node_id: lastLoraNodeId,
field: 'clip',
},
destination: {
node_id: currentLoraNodeId,
field: 'clip',
},
});
}
if (currentLoraIndex === loraCount - 1) {
// final lora, end the lora chain - we need to connect up to inference and conditioning nodes
graph.edges.push({
source: {
node_id: currentLoraNodeId,
field: 'unet',
},
destination: {
node_id: baseNodeId,
field: 'unet',
},
});
graph.edges.push({
source: {
node_id: currentLoraNodeId,
field: 'clip',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
});
graph.edges.push({
source: {
node_id: currentLoraNodeId,
field: 'clip',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
});
}
// increment the lora for the next one in the chain
lastLoraNodeId = currentLoraNodeId;
currentLoraIndex += 1;
});
upsertMetadata(graph, { loras: loraMetadata });
};

View File

@ -1,39 +0,0 @@
import type { RootState } from 'app/store/store';
import { LATENTS_TO_IMAGE, NSFW_CHECKER } from 'features/nodes/util/graph/constants';
import { getBoardField, getIsIntermediate } from 'features/nodes/util/graph/graphBuilderUtils';
import type { Invocation, NonNullableGraph } from 'services/api/types';
export const addNSFWCheckerToGraph = (
state: RootState,
graph: NonNullableGraph,
nodeIdToAddTo = LATENTS_TO_IMAGE
): void => {
const nodeToAddTo = graph.nodes[nodeIdToAddTo] as Invocation<'l2i'> | undefined;
if (!nodeToAddTo) {
// something has gone terribly awry
return;
}
nodeToAddTo.is_intermediate = true;
nodeToAddTo.use_cache = true;
const nsfwCheckerNode: Invocation<'img_nsfw'> = {
id: NSFW_CHECKER,
type: 'img_nsfw',
is_intermediate: getIsIntermediate(state),
board: getBoardField(state),
};
graph.nodes[NSFW_CHECKER] = nsfwCheckerNode;
graph.edges.push({
source: {
node_id: nodeIdToAddTo,
field: 'image',
},
destination: {
node_id: NSFW_CHECKER,
field: 'image',
},
});
};

View File

@ -1,208 +0,0 @@
import type { RootState } from 'app/store/store';
import { zModelIdentifierField } from 'features/nodes/types/common';
import { upsertMetadata } from 'features/nodes/util/graph/canvas/metadata';
import {
LORA_LOADER,
NEGATIVE_CONDITIONING,
POSITIVE_CONDITIONING,
SDXL_MODEL_LOADER,
SDXL_REFINER_INPAINT_CREATE_MASK,
SEAMLESS,
} from 'features/nodes/util/graph/constants';
import { filter, size } from 'lodash-es';
import type { Invocation, NonNullableGraph, S } from 'services/api/types';
export const addSDXLLoRAsToGraph = async (
state: RootState,
graph: NonNullableGraph,
baseNodeId: string,
modelLoaderNodeId: string = SDXL_MODEL_LOADER
): Promise<void> => {
/**
* LoRA nodes get the UNet and CLIP models from the main model loader and apply the LoRA to them.
* They then output the UNet and CLIP models references on to either the next LoRA in the chain,
* or to the inference/conditioning nodes.
*
* So we need to inject a LoRA chain into the graph.
*/
// TODO(MM2): check base model
const enabledLoRAs = filter(state.lora.loras, (l) => l.isEnabled ?? false);
const loraCount = size(enabledLoRAs);
if (loraCount === 0) {
return;
}
const loraMetadata: S['CoreMetadataInvocation']['loras'] = [];
// Handle Seamless Plugs
const unetLoaderId = modelLoaderNodeId;
let clipLoaderId = modelLoaderNodeId;
if ([SEAMLESS, SDXL_REFINER_INPAINT_CREATE_MASK].includes(modelLoaderNodeId)) {
clipLoaderId = SDXL_MODEL_LOADER;
}
// Remove modelLoaderNodeId unet/clip/clip2 connections to feed it to LoRAs
graph.edges = graph.edges.filter(
(e) =>
!(e.source.node_id === unetLoaderId && ['unet'].includes(e.source.field)) &&
!(e.source.node_id === clipLoaderId && ['clip'].includes(e.source.field)) &&
!(e.source.node_id === clipLoaderId && ['clip2'].includes(e.source.field))
);
// we need to remember the last lora so we can chain from it
let lastLoraNodeId = '';
let currentLoraIndex = 0;
enabledLoRAs.forEach(async (lora) => {
const { weight } = lora;
const currentLoraNodeId = `${LORA_LOADER}_${lora.model.key}`;
const parsedModel = zModelIdentifierField.parse(lora.model);
const loraLoaderNode: Invocation<'sdxl_lora_loader'> = {
type: 'sdxl_lora_loader',
id: currentLoraNodeId,
is_intermediate: true,
lora: parsedModel,
weight,
};
loraMetadata.push({ model: parsedModel, weight });
// add to graph
graph.nodes[currentLoraNodeId] = loraLoaderNode;
if (currentLoraIndex === 0) {
// first lora = start the lora chain, attach directly to model loader
graph.edges.push({
source: {
node_id: unetLoaderId,
field: 'unet',
},
destination: {
node_id: currentLoraNodeId,
field: 'unet',
},
});
graph.edges.push({
source: {
node_id: clipLoaderId,
field: 'clip',
},
destination: {
node_id: currentLoraNodeId,
field: 'clip',
},
});
graph.edges.push({
source: {
node_id: clipLoaderId,
field: 'clip2',
},
destination: {
node_id: currentLoraNodeId,
field: 'clip2',
},
});
} else {
// we are in the middle of the lora chain, instead connect to the previous lora
graph.edges.push({
source: {
node_id: lastLoraNodeId,
field: 'unet',
},
destination: {
node_id: currentLoraNodeId,
field: 'unet',
},
});
graph.edges.push({
source: {
node_id: lastLoraNodeId,
field: 'clip',
},
destination: {
node_id: currentLoraNodeId,
field: 'clip',
},
});
graph.edges.push({
source: {
node_id: lastLoraNodeId,
field: 'clip2',
},
destination: {
node_id: currentLoraNodeId,
field: 'clip2',
},
});
}
if (currentLoraIndex === loraCount - 1) {
// final lora, end the lora chain - we need to connect up to inference and conditioning nodes
graph.edges.push({
source: {
node_id: currentLoraNodeId,
field: 'unet',
},
destination: {
node_id: baseNodeId,
field: 'unet',
},
});
graph.edges.push({
source: {
node_id: currentLoraNodeId,
field: 'clip',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
});
graph.edges.push({
source: {
node_id: currentLoraNodeId,
field: 'clip',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
});
graph.edges.push({
source: {
node_id: currentLoraNodeId,
field: 'clip2',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip2',
},
});
graph.edges.push({
source: {
node_id: currentLoraNodeId,
field: 'clip2',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip2',
},
});
}
// increment the lora for the next one in the chain
lastLoraNodeId = currentLoraNodeId;
currentLoraIndex += 1;
});
upsertMetadata(graph, { loras: loraMetadata });
};

View File

@ -1,239 +0,0 @@
import type { RootState } from 'app/store/store';
import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetchingHelpers';
import { getModelMetadataField, upsertMetadata } from 'features/nodes/util/graph/canvas/metadata';
import {
CANVAS_OUTPUT,
INPAINT_CREATE_MASK,
LATENTS_TO_IMAGE,
SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH,
SDXL_CANVAS_INPAINT_GRAPH,
SDXL_CANVAS_OUTPAINT_GRAPH,
SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH,
SDXL_MODEL_LOADER,
SDXL_REFINER_DENOISE_LATENTS,
SDXL_REFINER_MODEL_LOADER,
SDXL_REFINER_NEGATIVE_CONDITIONING,
SDXL_REFINER_POSITIVE_CONDITIONING,
SDXL_REFINER_SEAMLESS,
} from 'features/nodes/util/graph/constants';
import { getPresetModifiedPrompts } from 'features/nodes/util/graph/graphBuilderUtils';
import type { NonNullableGraph } from 'services/api/types';
import { isRefinerMainModelModelConfig } from 'services/api/types';
export const addSDXLRefinerToGraph = async (
state: RootState,
graph: NonNullableGraph,
baseNodeId: string,
modelLoaderNodeId?: string
): Promise<void> => {
const {
refinerModel,
refinerPositiveAestheticScore,
refinerNegativeAestheticScore,
refinerSteps,
refinerScheduler,
refinerCFGScale,
refinerStart,
} = state.canvasV2.params;
if (!refinerModel) {
return;
}
const { seamlessXAxis, seamlessYAxis } = state.canvasV2.params;
const { boundingBoxScaleMethod } = state.canvas;
const isUsingScaledDimensions = ['auto', 'manual'].includes(boundingBoxScaleMethod);
const modelConfig = await fetchModelConfigWithTypeGuard(refinerModel.key, isRefinerMainModelModelConfig);
upsertMetadata(graph, {
refiner_model: getModelMetadataField(modelConfig),
refiner_positive_aesthetic_score: refinerPositiveAestheticScore,
refiner_negative_aesthetic_score: refinerNegativeAestheticScore,
refiner_cfg_scale: refinerCFGScale,
refiner_scheduler: refinerScheduler,
refiner_start: refinerStart,
refiner_steps: refinerSteps,
});
const modelLoaderId = modelLoaderNodeId ? modelLoaderNodeId : SDXL_MODEL_LOADER;
// Construct Style Prompt
const { positiveStylePrompt, negativeStylePrompt } = getPresetModifiedPrompts(state);
// Unplug SDXL Latents Generation To Latents To Image
graph.edges = graph.edges.filter((e) => !(e.source.node_id === baseNodeId && ['latents'].includes(e.source.field)));
graph.edges = graph.edges.filter((e) => !(e.source.node_id === modelLoaderId && ['vae'].includes(e.source.field)));
graph.nodes[SDXL_REFINER_MODEL_LOADER] = {
type: 'sdxl_refiner_model_loader',
id: SDXL_REFINER_MODEL_LOADER,
model: refinerModel,
};
graph.nodes[SDXL_REFINER_POSITIVE_CONDITIONING] = {
type: 'sdxl_refiner_compel_prompt',
id: SDXL_REFINER_POSITIVE_CONDITIONING,
style: positiveStylePrompt,
aesthetic_score: refinerPositiveAestheticScore,
};
graph.nodes[SDXL_REFINER_NEGATIVE_CONDITIONING] = {
type: 'sdxl_refiner_compel_prompt',
id: SDXL_REFINER_NEGATIVE_CONDITIONING,
style: negativeStylePrompt,
aesthetic_score: refinerNegativeAestheticScore,
};
graph.nodes[SDXL_REFINER_DENOISE_LATENTS] = {
type: 'denoise_latents',
id: SDXL_REFINER_DENOISE_LATENTS,
cfg_scale: refinerCFGScale,
steps: refinerSteps,
scheduler: refinerScheduler,
denoising_start: refinerStart,
denoising_end: 1,
};
// Add Seamless To Refiner
if (seamlessXAxis || seamlessYAxis) {
graph.nodes[SDXL_REFINER_SEAMLESS] = {
id: SDXL_REFINER_SEAMLESS,
type: 'seamless',
seamless_x: seamlessXAxis,
seamless_y: seamlessYAxis,
};
graph.edges.push(
{
source: {
node_id: SDXL_REFINER_MODEL_LOADER,
field: 'unet',
},
destination: {
node_id: SDXL_REFINER_SEAMLESS,
field: 'unet',
},
},
{
source: {
node_id: SDXL_REFINER_MODEL_LOADER,
field: 'vae',
},
destination: {
node_id: SDXL_REFINER_SEAMLESS,
field: 'vae',
},
},
{
source: {
node_id: SDXL_REFINER_SEAMLESS,
field: 'unet',
},
destination: {
node_id: SDXL_REFINER_DENOISE_LATENTS,
field: 'unet',
},
}
);
} else {
graph.edges.push({
source: {
node_id: SDXL_REFINER_MODEL_LOADER,
field: 'unet',
},
destination: {
node_id: SDXL_REFINER_DENOISE_LATENTS,
field: 'unet',
},
});
}
graph.edges.push(
{
source: {
node_id: SDXL_REFINER_MODEL_LOADER,
field: 'clip2',
},
destination: {
node_id: SDXL_REFINER_POSITIVE_CONDITIONING,
field: 'clip2',
},
},
{
source: {
node_id: SDXL_REFINER_MODEL_LOADER,
field: 'clip2',
},
destination: {
node_id: SDXL_REFINER_NEGATIVE_CONDITIONING,
field: 'clip2',
},
},
{
source: {
node_id: SDXL_REFINER_POSITIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: SDXL_REFINER_DENOISE_LATENTS,
field: 'positive_conditioning',
},
},
{
source: {
node_id: SDXL_REFINER_NEGATIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: SDXL_REFINER_DENOISE_LATENTS,
field: 'negative_conditioning',
},
},
{
source: {
node_id: baseNodeId,
field: 'latents',
},
destination: {
node_id: SDXL_REFINER_DENOISE_LATENTS,
field: 'latents',
},
}
);
if (graph.id === SDXL_CANVAS_INPAINT_GRAPH || graph.id === SDXL_CANVAS_OUTPAINT_GRAPH) {
graph.edges.push({
source: {
node_id: INPAINT_CREATE_MASK,
field: 'denoise_mask',
},
destination: {
node_id: SDXL_REFINER_DENOISE_LATENTS,
field: 'denoise_mask',
},
});
}
if (graph.id === SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH || graph.id === SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH) {
graph.edges.push({
source: {
node_id: SDXL_REFINER_DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: isUsingScaledDimensions ? LATENTS_TO_IMAGE : CANVAS_OUTPUT,
field: 'latents',
},
});
} else {
graph.edges.push({
source: {
node_id: SDXL_REFINER_DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
});
}
};

View File

@ -1,102 +0,0 @@
import type { RootState } from 'app/store/store';
import { upsertMetadata } from 'features/nodes/util/graph/canvas/metadata';
import {
DENOISE_LATENTS,
SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH,
SDXL_CANVAS_INPAINT_GRAPH,
SDXL_CANVAS_OUTPAINT_GRAPH,
SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH,
SDXL_CONTROL_LAYERS_GRAPH,
SDXL_DENOISE_LATENTS,
SEAMLESS,
VAE_LOADER,
} from 'features/nodes/util/graph/constants';
import type { NonNullableGraph } from 'services/api/types';
export const addSeamlessToLinearGraph = (
state: RootState,
graph: NonNullableGraph,
modelLoaderNodeId: string
): void => {
// Remove Existing UNet Connections
const { seamlessXAxis, seamlessYAxis, vae } = state.canvasV2.params;
const isAutoVae = !vae;
graph.nodes[SEAMLESS] = {
id: SEAMLESS,
type: 'seamless',
seamless_x: seamlessXAxis,
seamless_y: seamlessYAxis,
};
if (!isAutoVae) {
graph.nodes[VAE_LOADER] = {
type: 'vae_loader',
id: VAE_LOADER,
is_intermediate: true,
vae_model: vae,
};
}
if (seamlessXAxis) {
upsertMetadata(graph, {
seamless_x: seamlessXAxis,
});
}
if (seamlessYAxis) {
upsertMetadata(graph, {
seamless_y: seamlessYAxis,
});
}
let denoisingNodeId = DENOISE_LATENTS;
if (
graph.id === SDXL_CONTROL_LAYERS_GRAPH ||
graph.id === SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH ||
graph.id === SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH ||
graph.id === SDXL_CANVAS_INPAINT_GRAPH ||
graph.id === SDXL_CANVAS_OUTPAINT_GRAPH
) {
denoisingNodeId = SDXL_DENOISE_LATENTS;
}
graph.edges = graph.edges.filter(
(e) =>
!(e.source.node_id === modelLoaderNodeId && ['unet'].includes(e.source.field)) &&
!(e.source.node_id === modelLoaderNodeId && ['vae'].includes(e.source.field))
);
graph.edges.push(
{
source: {
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
node_id: SEAMLESS,
field: 'unet',
},
},
{
source: {
node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER,
field: 'vae',
},
destination: {
node_id: SEAMLESS,
field: 'vae',
},
},
{
source: {
node_id: SEAMLESS,
field: 'unet',
},
destination: {
node_id: denoisingNodeId,
field: 'unet',
},
}
);
};

View File

@ -1,140 +0,0 @@
import type { RootState } from 'app/store/store';
import { selectValidT2IAdapters } from 'features/controlAdapters/store/controlAdaptersSlice';
import type { ControlAdapterProcessorType, T2IAdapterConfig } from 'features/controlAdapters/store/types';
import type { ImageField } from 'features/nodes/types/common';
import { upsertMetadata } from 'features/nodes/util/graph/canvas/metadata';
import { T2I_ADAPTER_COLLECT } from 'features/nodes/util/graph/constants';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import type { Invocation, NonNullableGraph, S } from 'services/api/types';
import { assert } from 'tsafe';
export const addT2IAdaptersToLinearGraph = async (
state: RootState,
graph: NonNullableGraph,
baseNodeId: string
): Promise<void> => {
// The generation tab has special handling - its control adapters are set up in the Control Layers graph helper.
const activeTabName = activeTabNameSelector(state);
assert(activeTabName !== 'generation', 'Tried to use addT2IAdaptersToLinearGraph on generation tab');
const t2iAdapters = selectValidT2IAdapters(state.controlAdapters).filter(
({ model, processedControlImage, processorType, controlImage, isEnabled }) => {
const hasModel = Boolean(model);
const doesBaseMatch = model?.base === state.canvasV2.params.model?.base;
const hasControlImage = (processedControlImage && processorType !== 'none') || controlImage;
return isEnabled && hasModel && doesBaseMatch && hasControlImage;
}
);
if (t2iAdapters.length) {
// Even though denoise_latents' t2i adapter input is SINGLE_OR_COLLECTION, keep it simple and always use a collect
const t2iAdapterCollectNode: Invocation<'collect'> = {
id: T2I_ADAPTER_COLLECT,
type: 'collect',
is_intermediate: true,
};
graph.nodes[T2I_ADAPTER_COLLECT] = t2iAdapterCollectNode;
graph.edges.push({
source: { node_id: T2I_ADAPTER_COLLECT, field: 'collection' },
destination: {
node_id: baseNodeId,
field: 't2i_adapter',
},
});
const t2iAdapterMetadata: S['CoreMetadataInvocation']['t2iAdapters'] = [];
for (const t2iAdapter of t2iAdapters) {
if (!t2iAdapter.model) {
return;
}
const {
id,
controlImage,
processedControlImage,
beginStepPct,
endStepPct,
resizeMode,
model,
processorType,
weight,
} = t2iAdapter;
const t2iAdapterNode: Invocation<'t2i_adapter'> = {
id: `t2i_adapter_${id}`,
type: 't2i_adapter',
is_intermediate: true,
begin_step_percent: beginStepPct,
end_step_percent: endStepPct,
resize_mode: resizeMode,
t2i_adapter_model: model,
weight: weight,
image: buildControlImage(controlImage, processedControlImage, processorType),
};
graph.nodes[t2iAdapterNode.id] = t2iAdapterNode;
t2iAdapterMetadata.push(buildT2IAdapterMetadata(t2iAdapter));
graph.edges.push({
source: { node_id: t2iAdapterNode.id, field: 't2i_adapter' },
destination: {
node_id: T2I_ADAPTER_COLLECT,
field: 'item',
},
});
}
upsertMetadata(graph, { t2iAdapters: t2iAdapterMetadata });
}
};
const buildControlImage = (
controlImage: string | null,
processedControlImage: string | null,
processorType: ControlAdapterProcessorType
): ImageField => {
let image: ImageField | null = null;
if (processedControlImage && processorType !== 'none') {
// We've already processed the image in the app, so we can just use the processed image
image = {
image_name: processedControlImage,
};
} else if (controlImage) {
// The control image is preprocessed
image = {
image_name: controlImage,
};
}
assert(image, 'T2I Adapter image is required');
return image;
};
const buildT2IAdapterMetadata = (t2iAdapter: T2IAdapterConfig): S['T2IAdapterMetadataField'] => {
const { controlImage, processedControlImage, beginStepPct, endStepPct, resizeMode, model, processorType, weight } =
t2iAdapter;
assert(model, 'T2I Adapter model is required');
const processed_image =
processedControlImage && processorType !== 'none'
? {
image_name: processedControlImage,
}
: null;
assert(controlImage, 'T2I Adapter image is required');
return {
t2i_adapter_model: model,
weight,
begin_step_percent: beginStepPct,
end_step_percent: endStepPct,
resize_mode: resizeMode,
image: {
image_name: controlImage,
},
processed_image,
};
};

View File

@ -1,175 +0,0 @@
import type { RootState } from 'app/store/store';
import { upsertMetadata } from 'features/nodes/util/graph/canvas/metadata';
import {
CANVAS_IMAGE_TO_IMAGE_GRAPH,
CANVAS_INPAINT_GRAPH,
CANVAS_OUTPAINT_GRAPH,
CANVAS_OUTPUT,
CANVAS_TEXT_TO_IMAGE_GRAPH,
CONTROL_LAYERS_GRAPH,
IMAGE_TO_LATENTS,
INPAINT_CREATE_MASK,
INPAINT_IMAGE,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH,
SDXL_CANVAS_INPAINT_GRAPH,
SDXL_CANVAS_OUTPAINT_GRAPH,
SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH,
SDXL_CONTROL_LAYERS_GRAPH,
SDXL_REFINER_SEAMLESS,
SEAMLESS,
VAE_LOADER,
} from 'features/nodes/util/graph/constants';
import type { NonNullableGraph } from 'services/api/types';
export const addVAEToGraph = async (
state: RootState,
graph: NonNullableGraph,
modelLoaderNodeId: string = MAIN_MODEL_LOADER
): Promise<void> => {
const { vae, seamlessXAxis, seamlessYAxis } = state.canvasV2.params;
const { boundingBoxScaleMethod } = state.canvas;
const { refinerModel } = state.canvasV2.params;
const isUsingScaledDimensions = ['auto', 'manual'].includes(boundingBoxScaleMethod);
const isAutoVae = !vae;
const isSeamlessEnabled = seamlessXAxis || seamlessYAxis;
const isSDXL = Boolean(graph.id?.includes('sdxl'));
const isUsingRefiner = isSDXL && Boolean(refinerModel);
if (!isAutoVae && !isSeamlessEnabled) {
graph.nodes[VAE_LOADER] = {
type: 'vae_loader',
id: VAE_LOADER,
is_intermediate: true,
vae_model: vae,
};
}
if (graph.id === CONTROL_LAYERS_GRAPH || graph.id === SDXL_CONTROL_LAYERS_GRAPH) {
graph.edges.push({
source: {
node_id: isSeamlessEnabled
? isUsingRefiner
? SDXL_REFINER_SEAMLESS
: SEAMLESS
: isAutoVae
? modelLoaderNodeId
: VAE_LOADER,
field: 'vae',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'vae',
},
});
}
if (
graph.id === CANVAS_TEXT_TO_IMAGE_GRAPH ||
graph.id === CANVAS_IMAGE_TO_IMAGE_GRAPH ||
graph.id === SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH ||
graph.id === SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH
) {
graph.edges.push({
source: {
node_id: isSeamlessEnabled
? isUsingRefiner
? SDXL_REFINER_SEAMLESS
: SEAMLESS
: isAutoVae
? modelLoaderNodeId
: VAE_LOADER,
field: 'vae',
},
destination: {
node_id: isUsingScaledDimensions ? LATENTS_TO_IMAGE : CANVAS_OUTPUT,
field: 'vae',
},
});
}
if (
(graph.id === CONTROL_LAYERS_GRAPH ||
graph.id === SDXL_CONTROL_LAYERS_GRAPH ||
graph.id === CANVAS_IMAGE_TO_IMAGE_GRAPH ||
graph.id === SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH) &&
Boolean(graph.nodes[IMAGE_TO_LATENTS])
) {
graph.edges.push({
source: {
node_id: isSeamlessEnabled
? isUsingRefiner
? SDXL_REFINER_SEAMLESS
: SEAMLESS
: isAutoVae
? modelLoaderNodeId
: VAE_LOADER,
field: 'vae',
},
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'vae',
},
});
}
if (
graph.id === CANVAS_INPAINT_GRAPH ||
graph.id === CANVAS_OUTPAINT_GRAPH ||
graph.id === SDXL_CANVAS_INPAINT_GRAPH ||
graph.id === SDXL_CANVAS_OUTPAINT_GRAPH
) {
graph.edges.push(
{
source: {
node_id: isSeamlessEnabled
? isUsingRefiner
? SDXL_REFINER_SEAMLESS
: SEAMLESS
: isAutoVae
? modelLoaderNodeId
: VAE_LOADER,
field: 'vae',
},
destination: {
node_id: INPAINT_IMAGE,
field: 'vae',
},
},
{
source: {
node_id: isSeamlessEnabled ? SEAMLESS : isAutoVae ? modelLoaderNodeId : VAE_LOADER,
field: 'vae',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'vae',
},
},
{
source: {
node_id: isSeamlessEnabled
? isUsingRefiner
? SDXL_REFINER_SEAMLESS
: SEAMLESS
: isAutoVae
? modelLoaderNodeId
: VAE_LOADER,
field: 'vae',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'vae',
},
}
);
}
if (vae) {
upsertMetadata(graph, { vae });
}
};

View File

@ -1,60 +0,0 @@
import type { RootState } from 'app/store/store';
import { LATENTS_TO_IMAGE, NSFW_CHECKER, WATERMARKER } from 'features/nodes/util/graph/constants';
import { getBoardField, getIsIntermediate } from 'features/nodes/util/graph/graphBuilderUtils';
import type { Invocation, NonNullableGraph } from 'services/api/types';
export const addWatermarkerToGraph = (
state: RootState,
graph: NonNullableGraph,
nodeIdToAddTo = LATENTS_TO_IMAGE
): void => {
const nodeToAddTo = graph.nodes[nodeIdToAddTo] as Invocation<'l2i'> | undefined;
const nsfwCheckerNode = graph.nodes[NSFW_CHECKER] as Invocation<'img_nsfw'> | undefined;
if (!nodeToAddTo) {
// something has gone terribly awry
return;
}
const watermarkerNode: Invocation<'img_watermark'> = {
id: WATERMARKER,
type: 'img_watermark',
is_intermediate: getIsIntermediate(state),
board: getBoardField(state),
};
graph.nodes[WATERMARKER] = watermarkerNode;
// no matter the situation, we want the l2i node to be intermediate
nodeToAddTo.is_intermediate = true;
nodeToAddTo.use_cache = true;
if (nsfwCheckerNode) {
// if we are using NSFW checker, we need to "disable" it output by marking it intermediate,
// then connect it to the watermark node
nsfwCheckerNode.is_intermediate = true;
graph.edges.push({
source: {
node_id: NSFW_CHECKER,
field: 'image',
},
destination: {
node_id: WATERMARKER,
field: 'image',
},
});
} else {
// otherwise we just connect to the watermark node
graph.edges.push({
source: {
node_id: nodeIdToAddTo,
field: 'image',
},
destination: {
node_id: WATERMARKER,
field: 'image',
},
});
}
};

View File

@ -1,57 +0,0 @@
import type { RootState } from 'app/store/store';
import type { ImageDTO, NonNullableGraph } from 'services/api/types';
import { buildCanvasImageToImageGraph } from './buildCanvasImageToImageGraph';
import { buildCanvasInpaintGraph } from './buildCanvasInpaintGraph';
import { buildCanvasOutpaintGraph } from './buildCanvasOutpaintGraph';
import { buildCanvasSDXLImageToImageGraph } from './buildCanvasSDXLImageToImageGraph';
import { buildCanvasSDXLInpaintGraph } from './buildCanvasSDXLInpaintGraph';
import { buildCanvasSDXLOutpaintGraph } from './buildCanvasSDXLOutpaintGraph';
import { buildCanvasSDXLTextToImageGraph } from './buildCanvasSDXLTextToImageGraph';
import { buildCanvasTextToImageGraph } from './buildCanvasTextToImageGraph';
export const buildCanvasGraph = async (
state: RootState,
generationMode: 'txt2img' | 'img2img' | 'inpaint' | 'outpaint',
canvasInitImage: ImageDTO | undefined,
canvasMaskImage: ImageDTO | undefined
): Promise<NonNullableGraph> => {
let graph: NonNullableGraph;
if (generationMode === 'txt2img') {
if (state.canvasV2.params.model && state.canvasV2.params.model.base === 'sdxl') {
graph = await buildCanvasSDXLTextToImageGraph(state);
} else {
graph = await buildCanvasTextToImageGraph(state);
}
} else if (generationMode === 'img2img') {
if (!canvasInitImage) {
throw new Error('Missing canvas init image');
}
if (state.canvasV2.params.model && state.canvasV2.params.model.base === 'sdxl') {
graph = await buildCanvasSDXLImageToImageGraph(state, canvasInitImage);
} else {
graph = await buildCanvasImageToImageGraph(state, canvasInitImage);
}
} else if (generationMode === 'inpaint') {
if (!canvasInitImage || !canvasMaskImage) {
throw new Error('Missing canvas init and mask images');
}
if (state.canvasV2.params.model && state.canvasV2.params.model.base === 'sdxl') {
graph = await buildCanvasSDXLInpaintGraph(state, canvasInitImage, canvasMaskImage);
} else {
graph = await buildCanvasInpaintGraph(state, canvasInitImage, canvasMaskImage);
}
} else {
if (!canvasInitImage) {
throw new Error('Missing canvas init image');
}
if (state.canvasV2.params.model && state.canvasV2.params.model.base === 'sdxl') {
graph = await buildCanvasSDXLOutpaintGraph(state, canvasInitImage, canvasMaskImage);
} else {
graph = await buildCanvasOutpaintGraph(state, canvasInitImage, canvasMaskImage);
}
}
return graph;
};

View File

@ -1,374 +0,0 @@
import { logger } from 'app/logging/logger';
import type { RootState } from 'app/store/store';
import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetchingHelpers';
import { addCoreMetadataNode, getModelMetadataField } from 'features/nodes/util/graph/canvas/metadata';
import {
CANVAS_IMAGE_TO_IMAGE_GRAPH,
CANVAS_OUTPUT,
CLIP_SKIP,
DENOISE_LATENTS,
IMAGE_TO_LATENTS,
IMG2IMG_RESIZE,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
SEAMLESS,
} from 'features/nodes/util/graph/constants';
import {
getBoardField,
getIsIntermediate,
getPresetModifiedPrompts,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { ImageDTO, Invocation, NonNullableGraph } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addIPAdapterToLinearGraph } from './addIPAdapterToLinearGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addT2IAdaptersToLinearGraph } from './addT2IAdapterToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
/**
* Builds the Canvas tab's Image to Image graph.
*/
export const buildCanvasImageToImageGraph = async (
state: RootState,
initialImage: ImageDTO
): Promise<NonNullableGraph> => {
const log = logger('nodes');
const {
model,
cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
scheduler,
seed,
steps,
img2imgStrength: strength,
vaePrecision,
clipSkip,
shouldUseCpuNoise,
seamlessXAxis,
seamlessYAxis,
} = state.canvasV2.params;
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const fp32 = vaePrecision === 'fp32';
const is_intermediate = true;
const isUsingScaledDimensions = ['auto', 'manual'].includes(boundingBoxScaleMethod);
if (!model) {
log.error('No model found in state');
throw new Error('No model found in state');
}
let modelLoaderNodeId = MAIN_MODEL_LOADER;
const use_cpu = shouldUseCpuNoise;
const { positivePrompt, negativePrompt } = getPresetModifiedPrompts(state);
/**
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
* full graph here as a template. Then use the parameters from app state and set friendlier node
* ids.
*
* The only thing we need extra logic for is handling randomized seed, control net, and for img2img,
* the `fit` param. These are added to the graph at the end.
*/
// copy-pasted graph from node editor, filled in with state values & friendly node ids
const graph: NonNullableGraph = {
id: CANVAS_IMAGE_TO_IMAGE_GRAPH,
nodes: {
[modelLoaderNodeId]: {
type: 'main_model_loader',
id: modelLoaderNodeId,
is_intermediate,
model,
},
[CLIP_SKIP]: {
type: 'clip_skip',
id: CLIP_SKIP,
is_intermediate,
skipped_layers: clipSkip,
},
[POSITIVE_CONDITIONING]: {
type: 'compel',
id: POSITIVE_CONDITIONING,
is_intermediate,
prompt: positivePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'compel',
id: NEGATIVE_CONDITIONING,
is_intermediate,
prompt: negativePrompt,
},
[NOISE]: {
type: 'noise',
id: NOISE,
is_intermediate,
use_cpu,
seed,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions ? height : scaledBoundingBoxDimensions.height,
},
[IMAGE_TO_LATENTS]: {
type: 'i2l',
id: IMAGE_TO_LATENTS,
is_intermediate,
},
[DENOISE_LATENTS]: {
type: 'denoise_latents',
id: DENOISE_LATENTS,
is_intermediate,
cfg_scale,
cfg_rescale_multiplier,
scheduler,
steps,
denoising_start: 1 - strength,
denoising_end: 1,
},
[CANVAS_OUTPUT]: {
type: 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: getIsIntermediate(state),
board: getBoardField(state),
use_cache: false,
},
},
edges: [
// Connect Model Loader to CLIP Skip and UNet
{
source: {
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'unet',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
node_id: CLIP_SKIP,
field: 'clip',
},
},
// Connect CLIP Skip To Conditioning
{
source: {
node_id: CLIP_SKIP,
field: 'clip',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: CLIP_SKIP,
field: 'clip',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
},
// Connect Everything To Denoise Latents
{
source: {
node_id: POSITIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'positive_conditioning',
},
},
{
source: {
node_id: NEGATIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'negative_conditioning',
},
},
{
source: {
node_id: NOISE,
field: 'noise',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'noise',
},
},
{
source: {
node_id: IMAGE_TO_LATENTS,
field: 'latents',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
},
],
};
// Decode Latents To Image & Handle Scaled Before Processing
if (isUsingScaledDimensions) {
graph.nodes[IMG2IMG_RESIZE] = {
id: IMG2IMG_RESIZE,
type: 'img_resize',
is_intermediate,
image: initialImage,
width: scaledBoundingBoxDimensions.width,
height: scaledBoundingBoxDimensions.height,
};
graph.nodes[LATENTS_TO_IMAGE] = {
id: LATENTS_TO_IMAGE,
type: 'l2i',
is_intermediate,
fp32,
};
graph.nodes[CANVAS_OUTPUT] = {
id: CANVAS_OUTPUT,
type: 'img_resize',
is_intermediate: getIsIntermediate(state),
board: getBoardField(state),
width: width,
height: height,
use_cache: false,
};
graph.edges.push(
{
source: {
node_id: IMG2IMG_RESIZE,
field: 'image',
},
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'image',
},
},
{
source: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'image',
},
}
);
} else {
graph.nodes[CANVAS_OUTPUT] = {
type: 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: getIsIntermediate(state),
board: getBoardField(state),
fp32,
use_cache: false,
};
(graph.nodes[IMAGE_TO_LATENTS] as Invocation<'i2l'>).image = initialImage;
graph.edges.push({
source: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'latents',
},
});
}
const modelConfig = await fetchModelConfigWithTypeGuard(model.key, isNonRefinerMainModelConfig);
addCoreMetadataNode(
graph,
{
generation_mode: 'img2img',
cfg_scale,
cfg_rescale_multiplier,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions ? height : scaledBoundingBoxDimensions.height,
positive_prompt: positivePrompt,
negative_prompt: negativePrompt,
model: getModelMetadataField(modelConfig),
seed,
steps,
rand_device: use_cpu ? 'cpu' : 'cuda',
scheduler,
clip_skip: clipSkip,
strength,
init_image: initialImage.image_name,
_canvas_objects: state.canvas.layerState.objects,
},
CANVAS_OUTPUT
);
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// add LoRA support
await addLoRAsToGraph(state, graph, DENOISE_LATENTS);
// optionally add custom VAE
await addVAEToGraph(state, graph, modelLoaderNodeId);
// add controlnet, mutating `graph`
await addControlNetToLinearGraph(state, graph, DENOISE_LATENTS);
// Add IP Adapter
await addIPAdapterToLinearGraph(state, graph, DENOISE_LATENTS);
await addT2IAdaptersToLinearGraph(state, graph, DENOISE_LATENTS);
// NSFW & watermark - must be last thing added to graph
if (state.system.shouldUseNSFWChecker) {
// must add before watermarker!
addNSFWCheckerToGraph(state, graph, CANVAS_OUTPUT);
}
if (state.system.shouldUseWatermarker) {
// must add after nsfw checker!
addWatermarkerToGraph(state, graph, CANVAS_OUTPUT);
}
return graph;
};

View File

@ -1,469 +0,0 @@
import { logger } from 'app/logging/logger';
import type { RootState } from 'app/store/store';
import { addCoreMetadataNode } from 'features/nodes/util/graph/canvas/metadata';
import {
CANVAS_INPAINT_GRAPH,
CANVAS_OUTPUT,
CLIP_SKIP,
DENOISE_LATENTS,
INPAINT_CREATE_MASK,
INPAINT_IMAGE,
INPAINT_IMAGE_RESIZE_DOWN,
INPAINT_IMAGE_RESIZE_UP,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
MASK_RESIZE_DOWN,
MASK_RESIZE_UP,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
SEAMLESS,
} from 'features/nodes/util/graph/constants';
import {
getBoardField,
getIsIntermediate,
getPresetModifiedPrompts,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { ImageDTO, Invocation, NonNullableGraph } from 'services/api/types';
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addIPAdapterToLinearGraph } from './addIPAdapterToLinearGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addT2IAdaptersToLinearGraph } from './addT2IAdapterToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
/**
* Builds the Canvas tab's Inpaint graph.
*/
export const buildCanvasInpaintGraph = async (
state: RootState,
canvasInitImage: ImageDTO,
canvasMaskImage: ImageDTO
): Promise<NonNullableGraph> => {
const log = logger('nodes');
const {
model,
cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
scheduler,
steps,
img2imgStrength: strength,
seed,
vaePrecision,
shouldUseCpuNoise,
clipSkip,
seamlessXAxis,
seamlessYAxis,
canvasCoherenceMode,
canvasCoherenceMinDenoise,
canvasCoherenceEdgeSize,
maskBlur,
} = state.canvasV2.params;
if (!model) {
log.error('No model found in state');
throw new Error('No model found in state');
}
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
// We may need to set the inpaint width and height to scale the image
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const is_intermediate = true;
const fp32 = vaePrecision === 'fp32';
const isUsingScaledDimensions = ['auto', 'manual'].includes(boundingBoxScaleMethod);
let modelLoaderNodeId = MAIN_MODEL_LOADER;
const use_cpu = shouldUseCpuNoise;
const { positivePrompt, negativePrompt } = getPresetModifiedPrompts(state);
const graph: NonNullableGraph = {
id: CANVAS_INPAINT_GRAPH,
nodes: {
[modelLoaderNodeId]: {
type: 'main_model_loader',
id: modelLoaderNodeId,
is_intermediate,
model,
},
[CLIP_SKIP]: {
type: 'clip_skip',
id: CLIP_SKIP,
is_intermediate,
skipped_layers: clipSkip,
},
[POSITIVE_CONDITIONING]: {
type: 'compel',
id: POSITIVE_CONDITIONING,
is_intermediate,
prompt: positivePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'compel',
id: NEGATIVE_CONDITIONING,
is_intermediate,
prompt: negativePrompt,
},
[INPAINT_IMAGE]: {
type: 'i2l',
id: INPAINT_IMAGE,
is_intermediate,
fp32,
},
[NOISE]: {
type: 'noise',
id: NOISE,
use_cpu,
seed,
is_intermediate,
},
[INPAINT_CREATE_MASK]: {
type: 'create_gradient_mask',
id: INPAINT_CREATE_MASK,
is_intermediate,
coherence_mode: canvasCoherenceMode,
minimum_denoise: canvasCoherenceMinDenoise,
edge_radius: canvasCoherenceEdgeSize,
tiled: false,
fp32: fp32,
},
[DENOISE_LATENTS]: {
type: 'denoise_latents',
id: DENOISE_LATENTS,
is_intermediate,
steps: steps,
cfg_scale: cfg_scale,
cfg_rescale_multiplier,
scheduler: scheduler,
denoising_start: 1 - strength,
denoising_end: 1,
},
[LATENTS_TO_IMAGE]: {
type: 'l2i',
id: LATENTS_TO_IMAGE,
is_intermediate,
fp32,
},
[CANVAS_OUTPUT]: {
type: 'canvas_paste_back',
id: CANVAS_OUTPUT,
is_intermediate: getIsIntermediate(state),
board: getBoardField(state),
mask_blur: maskBlur,
source_image: canvasInitImage,
},
},
edges: [
// Connect Model Loader to CLIP Skip and UNet
{
source: {
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'unet',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
node_id: CLIP_SKIP,
field: 'clip',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'unet',
},
},
// Connect CLIP Skip to Conditioning
{
source: {
node_id: CLIP_SKIP,
field: 'clip',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: CLIP_SKIP,
field: 'clip',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
},
// Connect Everything To Inpaint Node
{
source: {
node_id: POSITIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'positive_conditioning',
},
},
{
source: {
node_id: NEGATIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'negative_conditioning',
},
},
{
source: {
node_id: NOISE,
field: 'noise',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'noise',
},
},
{
source: {
node_id: INPAINT_IMAGE,
field: 'latents',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
},
{
source: {
node_id: INPAINT_CREATE_MASK,
field: 'denoise_mask',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'denoise_mask',
},
},
// Decode Inpainted Latents To Image
{
source: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
],
};
// Handle Scale Before Processing
if (isUsingScaledDimensions) {
const scaledWidth: number = scaledBoundingBoxDimensions.width;
const scaledHeight: number = scaledBoundingBoxDimensions.height;
// Add Scaling Nodes
graph.nodes[INPAINT_IMAGE_RESIZE_UP] = {
type: 'img_resize',
id: INPAINT_IMAGE_RESIZE_UP,
is_intermediate,
width: scaledWidth,
height: scaledHeight,
image: canvasInitImage,
};
graph.nodes[MASK_RESIZE_UP] = {
type: 'img_resize',
id: MASK_RESIZE_UP,
is_intermediate,
width: scaledWidth,
height: scaledHeight,
image: canvasMaskImage,
};
graph.nodes[INPAINT_IMAGE_RESIZE_DOWN] = {
type: 'img_resize',
id: INPAINT_IMAGE_RESIZE_DOWN,
is_intermediate,
width: width,
height: height,
};
graph.nodes[MASK_RESIZE_DOWN] = {
type: 'img_resize',
id: MASK_RESIZE_DOWN,
is_intermediate,
width: width,
height: height,
};
(graph.nodes[NOISE] as Invocation<'noise'>).width = scaledWidth;
(graph.nodes[NOISE] as Invocation<'noise'>).height = scaledHeight;
// Connect Nodes
graph.edges.push(
// Scale Inpaint Image and Mask
{
source: {
node_id: INPAINT_IMAGE_RESIZE_UP,
field: 'image',
},
destination: {
node_id: INPAINT_IMAGE,
field: 'image',
},
},
{
source: {
node_id: MASK_RESIZE_UP,
field: 'image',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'mask',
},
},
{
source: {
node_id: INPAINT_IMAGE_RESIZE_UP,
field: 'image',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'image',
},
},
// Resize Down
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: INPAINT_IMAGE_RESIZE_DOWN,
field: 'image',
},
},
{
source: {
node_id: INPAINT_CREATE_MASK,
field: 'expanded_mask_area',
},
destination: {
node_id: MASK_RESIZE_DOWN,
field: 'image',
},
},
// Paste Back
{
source: {
node_id: INPAINT_IMAGE_RESIZE_DOWN,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'target_image',
},
},
{
source: {
node_id: MASK_RESIZE_DOWN,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'mask',
},
}
);
} else {
// Add Images To Nodes
(graph.nodes[NOISE] as Invocation<'noise'>).width = width;
(graph.nodes[NOISE] as Invocation<'noise'>).height = height;
graph.nodes[INPAINT_IMAGE] = {
...(graph.nodes[INPAINT_IMAGE] as Invocation<'i2l'>),
image: canvasInitImage,
};
graph.nodes[INPAINT_CREATE_MASK] = {
...(graph.nodes[INPAINT_CREATE_MASK] as Invocation<'create_gradient_mask'>),
mask: canvasMaskImage,
};
// Paste Back
graph.nodes[CANVAS_OUTPUT] = {
...(graph.nodes[CANVAS_OUTPUT] as Invocation<'canvas_paste_back'>),
mask: canvasMaskImage,
};
graph.edges.push({
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'target_image',
},
});
}
addCoreMetadataNode(
graph,
{
generation_mode: 'inpaint',
_canvas_objects: state.canvas.layerState.objects,
},
CANVAS_OUTPUT
);
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// Add VAE
await addVAEToGraph(state, graph, modelLoaderNodeId);
// add LoRA support
await addLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId);
// add controlnet, mutating `graph`
await addControlNetToLinearGraph(state, graph, DENOISE_LATENTS);
// Add IP Adapter
await addIPAdapterToLinearGraph(state, graph, DENOISE_LATENTS);
await addT2IAdaptersToLinearGraph(state, graph, DENOISE_LATENTS);
// NSFW & watermark - must be last thing added to graph
if (state.system.shouldUseNSFWChecker) {
// must add before watermarker!
addNSFWCheckerToGraph(state, graph, CANVAS_OUTPUT);
}
if (state.system.shouldUseWatermarker) {
// must add after nsfw checker!
addWatermarkerToGraph(state, graph, CANVAS_OUTPUT);
}
return graph;
};

View File

@ -1,629 +0,0 @@
import { logger } from 'app/logging/logger';
import type { RootState } from 'app/store/store';
import { addCoreMetadataNode } from 'features/nodes/util/graph/canvas/metadata';
import {
CANVAS_OUTPAINT_GRAPH,
CANVAS_OUTPUT,
CLIP_SKIP,
DENOISE_LATENTS,
INPAINT_CREATE_MASK,
INPAINT_IMAGE,
INPAINT_IMAGE_RESIZE_DOWN,
INPAINT_IMAGE_RESIZE_UP,
INPAINT_INFILL,
INPAINT_INFILL_RESIZE_DOWN,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
MASK_COMBINE,
MASK_FROM_ALPHA,
MASK_RESIZE_DOWN,
MASK_RESIZE_UP,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
SEAMLESS,
} from 'features/nodes/util/graph/constants';
import {
getBoardField,
getIsIntermediate,
getPresetModifiedPrompts,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { ImageDTO, Invocation, NonNullableGraph } from 'services/api/types';
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addIPAdapterToLinearGraph } from './addIPAdapterToLinearGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addT2IAdaptersToLinearGraph } from './addT2IAdapterToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
/**
* Builds the Canvas tab's Outpaint graph.
*/
export const buildCanvasOutpaintGraph = async (
state: RootState,
canvasInitImage: ImageDTO,
canvasMaskImage?: ImageDTO
): Promise<NonNullableGraph> => {
const log = logger('nodes');
const {
model,
cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
scheduler,
steps,
img2imgStrength: strength,
seed,
vaePrecision,
shouldUseCpuNoise,
infillTileSize,
infillPatchmatchDownscaleSize,
infillMethod,
// infillMosaicTileWidth,
// infillMosaicTileHeight,
// infillMosaicMinColor,
// infillMosaicMaxColor,
infillColorValue,
clipSkip,
seamlessXAxis,
seamlessYAxis,
canvasCoherenceMode,
canvasCoherenceMinDenoise,
canvasCoherenceEdgeSize,
maskBlur,
} = state.canvasV2.params;
if (!model) {
log.error('No model found in state');
throw new Error('No model found in state');
}
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
// We may need to set the inpaint width and height to scale the image
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const fp32 = vaePrecision === 'fp32';
const is_intermediate = true;
const isUsingScaledDimensions = ['auto', 'manual'].includes(boundingBoxScaleMethod);
let modelLoaderNodeId = MAIN_MODEL_LOADER;
const use_cpu = shouldUseCpuNoise;
const { positivePrompt, negativePrompt } = getPresetModifiedPrompts(state);
const graph: NonNullableGraph = {
id: CANVAS_OUTPAINT_GRAPH,
nodes: {
[modelLoaderNodeId]: {
type: 'main_model_loader',
id: modelLoaderNodeId,
is_intermediate,
model,
},
[CLIP_SKIP]: {
type: 'clip_skip',
id: CLIP_SKIP,
is_intermediate,
skipped_layers: clipSkip,
},
[POSITIVE_CONDITIONING]: {
type: 'compel',
id: POSITIVE_CONDITIONING,
is_intermediate,
prompt: positivePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'compel',
id: NEGATIVE_CONDITIONING,
is_intermediate,
prompt: negativePrompt,
},
[MASK_FROM_ALPHA]: {
type: 'tomask',
id: MASK_FROM_ALPHA,
is_intermediate,
image: canvasInitImage,
},
[MASK_COMBINE]: {
type: 'mask_combine',
id: MASK_COMBINE,
is_intermediate,
mask2: canvasMaskImage,
},
[INPAINT_IMAGE]: {
type: 'i2l',
id: INPAINT_IMAGE,
is_intermediate,
fp32,
},
[NOISE]: {
type: 'noise',
id: NOISE,
use_cpu,
seed,
is_intermediate,
},
[INPAINT_CREATE_MASK]: {
type: 'create_gradient_mask',
id: INPAINT_CREATE_MASK,
is_intermediate,
coherence_mode: canvasCoherenceMode,
edge_radius: canvasCoherenceEdgeSize,
minimum_denoise: canvasCoherenceMinDenoise,
tiled: false,
fp32: fp32,
},
[DENOISE_LATENTS]: {
type: 'denoise_latents',
id: DENOISE_LATENTS,
is_intermediate,
steps: steps,
cfg_scale: cfg_scale,
cfg_rescale_multiplier,
scheduler: scheduler,
denoising_start: 1 - strength,
denoising_end: 1,
},
[LATENTS_TO_IMAGE]: {
type: 'l2i',
id: LATENTS_TO_IMAGE,
is_intermediate,
fp32,
},
[CANVAS_OUTPUT]: {
type: 'canvas_paste_back',
id: CANVAS_OUTPUT,
is_intermediate: getIsIntermediate(state),
board: getBoardField(state),
use_cache: false,
mask_blur: maskBlur,
},
},
edges: [
// Connect Model Loader To UNet & Clip Skip
{
source: {
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'unet',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
node_id: CLIP_SKIP,
field: 'clip',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'unet',
},
},
// Connect CLIP Skip to Conditioning
{
source: {
node_id: CLIP_SKIP,
field: 'clip',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: CLIP_SKIP,
field: 'clip',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
},
// Connect Infill Result To Inpaint Image
{
source: {
node_id: INPAINT_INFILL,
field: 'image',
},
destination: {
node_id: INPAINT_IMAGE,
field: 'image',
},
},
// Combine Mask from Init Image with User Painted Mask
{
source: {
node_id: MASK_FROM_ALPHA,
field: 'image',
},
destination: {
node_id: MASK_COMBINE,
field: 'mask1',
},
},
// Plug Everything Into Inpaint Node
{
source: {
node_id: POSITIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'positive_conditioning',
},
},
{
source: {
node_id: NEGATIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'negative_conditioning',
},
},
{
source: {
node_id: NOISE,
field: 'noise',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'noise',
},
},
{
source: {
node_id: INPAINT_IMAGE,
field: 'latents',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
},
// Create Inpaint Mask
{
source: {
node_id: isUsingScaledDimensions ? MASK_RESIZE_UP : MASK_COMBINE,
field: 'image',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'mask',
},
},
{
source: {
node_id: INPAINT_CREATE_MASK,
field: 'denoise_mask',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'denoise_mask',
},
},
{
source: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
],
};
// Add Infill Nodes
if (infillMethod === 'patchmatch') {
graph.nodes[INPAINT_INFILL] = {
type: 'infill_patchmatch',
id: INPAINT_INFILL,
is_intermediate,
downscale: infillPatchmatchDownscaleSize,
};
}
if (infillMethod === 'lama') {
graph.nodes[INPAINT_INFILL] = {
type: 'infill_lama',
id: INPAINT_INFILL,
is_intermediate,
};
}
if (infillMethod === 'cv2') {
graph.nodes[INPAINT_INFILL] = {
type: 'infill_cv2',
id: INPAINT_INFILL,
is_intermediate,
};
}
if (infillMethod === 'tile') {
graph.nodes[INPAINT_INFILL] = {
type: 'infill_tile',
id: INPAINT_INFILL,
is_intermediate,
tile_size: infillTileSize,
};
}
// TODO: add mosaic back
// if (infillMethod === 'mosaic') {
// graph.nodes[INPAINT_INFILL] = {
// type: 'infill_mosaic',
// id: INPAINT_INFILL,
// is_intermediate,
// tile_width: infillMosaicTileWidth,
// tile_height: infillMosaicTileHeight,
// min_color: infillMosaicMinColor,
// max_color: infillMosaicMaxColor,
// };
// }
if (infillMethod === 'color') {
graph.nodes[INPAINT_INFILL] = {
type: 'infill_rgba',
id: INPAINT_INFILL,
color: infillColorValue,
is_intermediate,
};
}
// Handle Scale Before Processing
if (isUsingScaledDimensions) {
const scaledWidth: number = scaledBoundingBoxDimensions.width;
const scaledHeight: number = scaledBoundingBoxDimensions.height;
// Add Scaling Nodes
graph.nodes[INPAINT_IMAGE_RESIZE_UP] = {
type: 'img_resize',
id: INPAINT_IMAGE_RESIZE_UP,
is_intermediate,
width: scaledWidth,
height: scaledHeight,
image: canvasInitImage,
};
graph.nodes[MASK_RESIZE_UP] = {
type: 'img_resize',
id: MASK_RESIZE_UP,
is_intermediate,
width: scaledWidth,
height: scaledHeight,
};
graph.nodes[INPAINT_IMAGE_RESIZE_DOWN] = {
type: 'img_resize',
id: INPAINT_IMAGE_RESIZE_DOWN,
is_intermediate,
width: width,
height: height,
};
graph.nodes[INPAINT_INFILL_RESIZE_DOWN] = {
type: 'img_resize',
id: INPAINT_INFILL_RESIZE_DOWN,
is_intermediate,
width: width,
height: height,
};
graph.nodes[MASK_RESIZE_DOWN] = {
type: 'img_resize',
id: MASK_RESIZE_DOWN,
is_intermediate,
width: width,
height: height,
};
(graph.nodes[NOISE] as Invocation<'noise'>).width = scaledWidth;
(graph.nodes[NOISE] as Invocation<'noise'>).height = scaledHeight;
// Connect Nodes
graph.edges.push(
// Scale Inpaint Image
{
source: {
node_id: INPAINT_IMAGE_RESIZE_UP,
field: 'image',
},
destination: {
node_id: INPAINT_INFILL,
field: 'image',
},
},
// Take combined mask and resize
{
source: {
node_id: MASK_COMBINE,
field: 'image',
},
destination: {
node_id: MASK_RESIZE_UP,
field: 'image',
},
},
{
source: {
node_id: INPAINT_IMAGE_RESIZE_UP,
field: 'image',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'image',
},
},
// Resize Results Down
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: INPAINT_IMAGE_RESIZE_DOWN,
field: 'image',
},
},
{
source: {
node_id: INPAINT_CREATE_MASK,
field: 'expanded_mask_area',
},
destination: {
node_id: MASK_RESIZE_DOWN,
field: 'image',
},
},
{
source: {
node_id: INPAINT_INFILL,
field: 'image',
},
destination: {
node_id: INPAINT_INFILL_RESIZE_DOWN,
field: 'image',
},
},
// Paste Back
{
source: {
node_id: INPAINT_INFILL_RESIZE_DOWN,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'source_image',
},
},
{
source: {
node_id: INPAINT_IMAGE_RESIZE_DOWN,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'target_image',
},
},
{
source: {
node_id: INPAINT_CREATE_MASK,
field: 'expanded_mask_area',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'mask',
},
}
);
} else {
// Add Images To Nodes
graph.nodes[INPAINT_INFILL] = {
...(graph.nodes[INPAINT_INFILL] as Invocation<'infill_tile'> | Invocation<'infill_patchmatch'>),
image: canvasInitImage,
};
(graph.nodes[NOISE] as Invocation<'noise'>).width = width;
(graph.nodes[NOISE] as Invocation<'noise'>).height = height;
graph.nodes[INPAINT_IMAGE] = {
...(graph.nodes[INPAINT_IMAGE] as Invocation<'i2l'>),
image: canvasInitImage,
};
graph.edges.push(
{
source: {
node_id: INPAINT_INFILL,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'source_image',
},
},
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'target_image',
},
},
{
source: {
node_id: MASK_COMBINE,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'mask',
},
}
);
}
addCoreMetadataNode(
graph,
{
generation_mode: 'outpaint',
_canvas_objects: state.canvas.layerState.objects,
},
CANVAS_OUTPUT
);
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// Add VAE
await addVAEToGraph(state, graph, modelLoaderNodeId);
// add LoRA support
await addLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId);
// add controlnet, mutating `graph`
await addControlNetToLinearGraph(state, graph, DENOISE_LATENTS);
// Add IP Adapter
await addIPAdapterToLinearGraph(state, graph, DENOISE_LATENTS);
await addT2IAdaptersToLinearGraph(state, graph, DENOISE_LATENTS);
// NSFW & watermark - must be last thing added to graph
if (state.system.shouldUseNSFWChecker) {
// must add before watermarker!
addNSFWCheckerToGraph(state, graph, CANVAS_OUTPUT);
}
if (state.system.shouldUseWatermarker) {
// must add after nsfw checker!
addWatermarkerToGraph(state, graph, CANVAS_OUTPUT);
}
return graph;
};

View File

@ -1,382 +0,0 @@
import { logger } from 'app/logging/logger';
import type { RootState } from 'app/store/store';
import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetchingHelpers';
import { addCoreMetadataNode, getModelMetadataField } from 'features/nodes/util/graph/canvas/metadata';
import {
CANVAS_OUTPUT,
IMAGE_TO_LATENTS,
IMG2IMG_RESIZE,
LATENTS_TO_IMAGE,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH,
SDXL_DENOISE_LATENTS,
SDXL_MODEL_LOADER,
SDXL_REFINER_SEAMLESS,
SEAMLESS,
} from 'features/nodes/util/graph/constants';
import {
getBoardField,
getIsIntermediate,
getPresetModifiedPrompts,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { ImageDTO, Invocation, NonNullableGraph } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addIPAdapterToLinearGraph } from './addIPAdapterToLinearGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph';
import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addT2IAdaptersToLinearGraph } from './addT2IAdapterToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
/**
* Builds the Canvas tab's Image to Image graph.
*/
export const buildCanvasSDXLImageToImageGraph = async (
state: RootState,
initialImage: ImageDTO
): Promise<NonNullableGraph> => {
const log = logger('nodes');
const {
model,
cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
scheduler,
seed,
steps,
vaePrecision,
shouldUseCpuNoise,
seamlessXAxis,
seamlessYAxis,
img2imgStrength: strength,
} = state.canvasV2.params;
const { refinerModel, refinerStart } = state.canvasV2.params;
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const fp32 = vaePrecision === 'fp32';
const is_intermediate = true;
const isUsingScaledDimensions = ['auto', 'manual'].includes(boundingBoxScaleMethod);
if (!model) {
log.error('No model found in state');
throw new Error('No model found in state');
}
// Model Loader ID
let modelLoaderNodeId = SDXL_MODEL_LOADER;
const use_cpu = shouldUseCpuNoise;
// Construct Style Prompt
const { positivePrompt, negativePrompt, positiveStylePrompt, negativeStylePrompt } = getPresetModifiedPrompts(state);
/**
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
* full graph here as a template. Then use the parameters from app state and set friendlier node
* ids.
*
* The only thing we need extra logic for is handling randomized seed, control net, and for img2img,
* the `fit` param. These are added to the graph at the end.
*/
// copy-pasted graph from node editor, filled in with state values & friendly node ids
const graph: NonNullableGraph = {
id: SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH,
nodes: {
[modelLoaderNodeId]: {
type: 'sdxl_model_loader',
id: modelLoaderNodeId,
model,
},
[POSITIVE_CONDITIONING]: {
type: 'sdxl_compel_prompt',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
style: positiveStylePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'sdxl_compel_prompt',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
style: negativeStylePrompt,
},
[NOISE]: {
type: 'noise',
id: NOISE,
is_intermediate,
use_cpu,
seed,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions ? height : scaledBoundingBoxDimensions.height,
},
[IMAGE_TO_LATENTS]: {
type: 'i2l',
id: IMAGE_TO_LATENTS,
is_intermediate,
fp32,
},
[SDXL_DENOISE_LATENTS]: {
type: 'denoise_latents',
id: SDXL_DENOISE_LATENTS,
is_intermediate,
cfg_scale,
cfg_rescale_multiplier,
scheduler,
steps,
denoising_start: refinerModel ? Math.min(refinerStart, 1 - strength) : 1 - strength,
denoising_end: refinerModel ? refinerStart : 1,
},
},
edges: [
// Connect Model Loader To UNet & CLIP
{
source: {
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'unet',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'clip2',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip2',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'clip2',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip2',
},
},
// Connect Everything to Denoise Latents
{
source: {
node_id: POSITIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'positive_conditioning',
},
},
{
source: {
node_id: NEGATIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'negative_conditioning',
},
},
{
source: {
node_id: NOISE,
field: 'noise',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'noise',
},
},
{
source: {
node_id: IMAGE_TO_LATENTS,
field: 'latents',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'latents',
},
},
],
};
// Decode Latents To Image & Handle Scaled Before Processing
if (isUsingScaledDimensions) {
graph.nodes[IMG2IMG_RESIZE] = {
id: IMG2IMG_RESIZE,
type: 'img_resize',
is_intermediate,
image: initialImage,
width: scaledBoundingBoxDimensions.width,
height: scaledBoundingBoxDimensions.height,
};
graph.nodes[LATENTS_TO_IMAGE] = {
id: LATENTS_TO_IMAGE,
type: 'l2i',
is_intermediate,
fp32,
};
graph.nodes[CANVAS_OUTPUT] = {
id: CANVAS_OUTPUT,
type: 'img_resize',
is_intermediate: getIsIntermediate(state),
board: getBoardField(state),
width: width,
height: height,
use_cache: false,
};
graph.edges.push(
{
source: {
node_id: IMG2IMG_RESIZE,
field: 'image',
},
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'image',
},
},
{
source: {
node_id: SDXL_DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'image',
},
}
);
} else {
graph.nodes[CANVAS_OUTPUT] = {
type: 'l2i',
id: CANVAS_OUTPUT,
is_intermediate,
fp32,
use_cache: false,
};
(graph.nodes[IMAGE_TO_LATENTS] as Invocation<'i2l'>).image = initialImage;
graph.edges.push({
source: {
node_id: SDXL_DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'latents',
},
});
}
const modelConfig = await fetchModelConfigWithTypeGuard(model.key, isNonRefinerMainModelConfig);
addCoreMetadataNode(
graph,
{
generation_mode: 'img2img',
cfg_scale,
cfg_rescale_multiplier,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions ? height : scaledBoundingBoxDimensions.height,
positive_prompt: positivePrompt,
negative_prompt: negativePrompt,
model: getModelMetadataField(modelConfig),
seed,
steps,
rand_device: use_cpu ? 'cpu' : 'cuda',
scheduler,
strength,
init_image: initialImage.image_name,
positive_style_prompt: positiveStylePrompt,
negative_style_prompt: negativeStylePrompt,
_canvas_objects: state.canvas.layerState.objects,
},
CANVAS_OUTPUT
);
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// Add Refiner if enabled
if (refinerModel) {
await addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId);
if (seamlessXAxis || seamlessYAxis) {
modelLoaderNodeId = SDXL_REFINER_SEAMLESS;
}
}
// optionally add custom VAE
await addVAEToGraph(state, graph, modelLoaderNodeId);
// add LoRA support
await addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId);
// add controlnet, mutating `graph`
await addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);
// Add IP Adapter
await addIPAdapterToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);
await addT2IAdaptersToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);
// NSFW & watermark - must be last thing added to graph
if (state.system.shouldUseNSFWChecker) {
// must add before watermarker!
addNSFWCheckerToGraph(state, graph, CANVAS_OUTPUT);
}
if (state.system.shouldUseWatermarker) {
// must add after nsfw checker!
addWatermarkerToGraph(state, graph, CANVAS_OUTPUT);
}
return graph;
};

View File

@ -1,487 +0,0 @@
import { logger } from 'app/logging/logger';
import type { RootState } from 'app/store/store';
import { addCoreMetadataNode } from 'features/nodes/util/graph/canvas/metadata';
import {
CANVAS_OUTPUT,
INPAINT_CREATE_MASK,
INPAINT_IMAGE,
INPAINT_IMAGE_RESIZE_DOWN,
INPAINT_IMAGE_RESIZE_UP,
LATENTS_TO_IMAGE,
MASK_RESIZE_DOWN,
MASK_RESIZE_UP,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
SDXL_CANVAS_INPAINT_GRAPH,
SDXL_DENOISE_LATENTS,
SDXL_MODEL_LOADER,
SDXL_REFINER_SEAMLESS,
SEAMLESS,
} from 'features/nodes/util/graph/constants';
import {
getBoardField,
getIsIntermediate,
getPresetModifiedPrompts,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { ImageDTO, Invocation, NonNullableGraph } from 'services/api/types';
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addIPAdapterToLinearGraph } from './addIPAdapterToLinearGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph';
import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addT2IAdaptersToLinearGraph } from './addT2IAdapterToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
/**
* Builds the Canvas tab's Inpaint graph.
*/
export const buildCanvasSDXLInpaintGraph = async (
state: RootState,
canvasInitImage: ImageDTO,
canvasMaskImage: ImageDTO
): Promise<NonNullableGraph> => {
const log = logger('nodes');
const {
model,
cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
scheduler,
steps,
img2imgStrength: strength,
seed,
vaePrecision,
shouldUseCpuNoise,
seamlessXAxis,
seamlessYAxis,
canvasCoherenceMode,
canvasCoherenceMinDenoise,
canvasCoherenceEdgeSize,
maskBlur,
} = state.canvasV2.params;
const { refinerModel, refinerStart } = state.canvasV2.params;
if (!model) {
log.error('No model found in state');
throw new Error('No model found in state');
}
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
// We may need to set the inpaint width and height to scale the image
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const is_intermediate = true;
const fp32 = vaePrecision === 'fp32';
const isUsingScaledDimensions = ['auto', 'manual'].includes(boundingBoxScaleMethod);
let modelLoaderNodeId = SDXL_MODEL_LOADER;
const use_cpu = shouldUseCpuNoise;
// Construct Style Prompt
const { positivePrompt, negativePrompt, positiveStylePrompt, negativeStylePrompt } = getPresetModifiedPrompts(state);
const graph: NonNullableGraph = {
id: SDXL_CANVAS_INPAINT_GRAPH,
nodes: {
[modelLoaderNodeId]: {
type: 'sdxl_model_loader',
id: modelLoaderNodeId,
is_intermediate,
model,
},
[POSITIVE_CONDITIONING]: {
type: 'sdxl_compel_prompt',
id: POSITIVE_CONDITIONING,
is_intermediate,
prompt: positivePrompt,
style: positiveStylePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'sdxl_compel_prompt',
id: NEGATIVE_CONDITIONING,
is_intermediate,
prompt: negativePrompt,
style: negativeStylePrompt,
},
[INPAINT_IMAGE]: {
type: 'i2l',
id: INPAINT_IMAGE,
is_intermediate,
fp32,
},
[NOISE]: {
type: 'noise',
id: NOISE,
use_cpu,
seed,
is_intermediate,
},
[INPAINT_CREATE_MASK]: {
type: 'create_gradient_mask',
id: INPAINT_CREATE_MASK,
is_intermediate,
coherence_mode: canvasCoherenceMode,
minimum_denoise: refinerModel ? Math.max(0.2, canvasCoherenceMinDenoise) : canvasCoherenceMinDenoise,
edge_radius: canvasCoherenceEdgeSize,
tiled: false,
fp32: fp32,
},
[SDXL_DENOISE_LATENTS]: {
type: 'denoise_latents',
id: SDXL_DENOISE_LATENTS,
is_intermediate,
steps: steps,
cfg_scale: cfg_scale,
cfg_rescale_multiplier,
scheduler: scheduler,
denoising_start: refinerModel ? Math.min(refinerStart, 1 - strength) : 1 - strength,
denoising_end: refinerModel ? refinerStart : 1,
},
[LATENTS_TO_IMAGE]: {
type: 'l2i',
id: LATENTS_TO_IMAGE,
is_intermediate,
fp32,
},
[CANVAS_OUTPUT]: {
type: 'canvas_paste_back',
id: CANVAS_OUTPUT,
is_intermediate: getIsIntermediate(state),
board: getBoardField(state),
mask_blur: maskBlur,
source_image: canvasInitImage,
},
},
edges: [
// Connect Model Loader to UNet and CLIP
{
source: {
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'unet',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'clip2',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip2',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'clip2',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip2',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'unet',
},
},
// Connect Everything To Inpaint Node
{
source: {
node_id: POSITIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'positive_conditioning',
},
},
{
source: {
node_id: NEGATIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'negative_conditioning',
},
},
{
source: {
node_id: NOISE,
field: 'noise',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'noise',
},
},
{
source: {
node_id: INPAINT_IMAGE,
field: 'latents',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'latents',
},
},
{
source: {
node_id: INPAINT_CREATE_MASK,
field: 'denoise_mask',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'denoise_mask',
},
},
// Decode Inpainted Latents To Image
{
source: {
node_id: SDXL_DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
],
};
// Handle Scale Before Processing
if (isUsingScaledDimensions) {
const scaledWidth: number = scaledBoundingBoxDimensions.width;
const scaledHeight: number = scaledBoundingBoxDimensions.height;
// Add Scaling Nodes
graph.nodes[INPAINT_IMAGE_RESIZE_UP] = {
type: 'img_resize',
id: INPAINT_IMAGE_RESIZE_UP,
is_intermediate,
width: scaledWidth,
height: scaledHeight,
image: canvasInitImage,
};
graph.nodes[MASK_RESIZE_UP] = {
type: 'img_resize',
id: MASK_RESIZE_UP,
is_intermediate,
width: scaledWidth,
height: scaledHeight,
image: canvasMaskImage,
};
graph.nodes[INPAINT_IMAGE_RESIZE_DOWN] = {
type: 'img_resize',
id: INPAINT_IMAGE_RESIZE_DOWN,
is_intermediate,
width: width,
height: height,
};
graph.nodes[MASK_RESIZE_DOWN] = {
type: 'img_resize',
id: MASK_RESIZE_DOWN,
is_intermediate,
width: width,
height: height,
};
(graph.nodes[NOISE] as Invocation<'noise'>).width = scaledWidth;
(graph.nodes[NOISE] as Invocation<'noise'>).height = scaledHeight;
// Connect Nodes
graph.edges.push(
// Scale Inpaint Image and Mask
{
source: {
node_id: INPAINT_IMAGE_RESIZE_UP,
field: 'image',
},
destination: {
node_id: INPAINT_IMAGE,
field: 'image',
},
},
{
source: {
node_id: MASK_RESIZE_UP,
field: 'image',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'mask',
},
},
{
source: {
node_id: INPAINT_IMAGE_RESIZE_UP,
field: 'image',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'image',
},
},
// Resize Down
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: INPAINT_IMAGE_RESIZE_DOWN,
field: 'image',
},
},
{
source: {
node_id: INPAINT_CREATE_MASK,
field: 'expanded_mask_area',
},
destination: {
node_id: MASK_RESIZE_DOWN,
field: 'image',
},
},
// Paste Back
{
source: {
node_id: INPAINT_IMAGE_RESIZE_DOWN,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'target_image',
},
},
{
source: {
node_id: MASK_RESIZE_DOWN,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'mask',
},
}
);
} else {
// Add Images To Nodes
(graph.nodes[NOISE] as Invocation<'noise'>).width = width;
(graph.nodes[NOISE] as Invocation<'noise'>).height = height;
graph.nodes[INPAINT_IMAGE] = {
...(graph.nodes[INPAINT_IMAGE] as Invocation<'i2l'>),
image: canvasInitImage,
};
graph.nodes[INPAINT_CREATE_MASK] = {
...(graph.nodes[INPAINT_CREATE_MASK] as Invocation<'create_gradient_mask'>),
mask: canvasMaskImage,
};
// Paste Back
graph.nodes[CANVAS_OUTPUT] = {
...(graph.nodes[CANVAS_OUTPUT] as Invocation<'canvas_paste_back'>),
mask: canvasMaskImage,
};
graph.edges.push({
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'target_image',
},
});
}
addCoreMetadataNode(
graph,
{
generation_mode: 'sdxl_inpaint',
_canvas_objects: state.canvas.layerState.objects,
},
CANVAS_OUTPUT
);
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// Add Refiner if enabled
if (refinerModel) {
await addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId);
if (seamlessXAxis || seamlessYAxis) {
modelLoaderNodeId = SDXL_REFINER_SEAMLESS;
}
}
// Add VAE
await addVAEToGraph(state, graph, modelLoaderNodeId);
// add LoRA support
await addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId);
// add controlnet, mutating `graph`
await addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);
// Add IP Adapter
await addIPAdapterToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);
await addT2IAdaptersToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);
// NSFW & watermark - must be last thing added to graph
if (state.system.shouldUseNSFWChecker) {
// must add before watermarker!
addNSFWCheckerToGraph(state, graph, CANVAS_OUTPUT);
}
if (state.system.shouldUseWatermarker) {
// must add after nsfw checker!
addWatermarkerToGraph(state, graph, CANVAS_OUTPUT);
}
return graph;
};

View File

@ -1,644 +0,0 @@
import { logger } from 'app/logging/logger';
import type { RootState } from 'app/store/store';
import { addCoreMetadataNode } from 'features/nodes/util/graph/canvas/metadata';
import {
CANVAS_OUTPUT,
INPAINT_CREATE_MASK,
INPAINT_IMAGE,
INPAINT_IMAGE_RESIZE_DOWN,
INPAINT_IMAGE_RESIZE_UP,
INPAINT_INFILL,
INPAINT_INFILL_RESIZE_DOWN,
LATENTS_TO_IMAGE,
MASK_COMBINE,
MASK_FROM_ALPHA,
MASK_RESIZE_DOWN,
MASK_RESIZE_UP,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
SDXL_CANVAS_OUTPAINT_GRAPH,
SDXL_DENOISE_LATENTS,
SDXL_MODEL_LOADER,
SDXL_REFINER_SEAMLESS,
SEAMLESS,
} from 'features/nodes/util/graph/constants';
import {
getBoardField,
getIsIntermediate,
getPresetModifiedPrompts,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { ImageDTO, Invocation, NonNullableGraph } from 'services/api/types';
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addIPAdapterToLinearGraph } from './addIPAdapterToLinearGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph';
import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addT2IAdaptersToLinearGraph } from './addT2IAdapterToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
/**
* Builds the Canvas tab's Outpaint graph.
*/
export const buildCanvasSDXLOutpaintGraph = async (
state: RootState,
canvasInitImage: ImageDTO,
canvasMaskImage?: ImageDTO
): Promise<NonNullableGraph> => {
const log = logger('nodes');
const {
model,
cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
scheduler,
steps,
img2imgStrength: strength,
seed,
vaePrecision,
shouldUseCpuNoise,
infillTileSize,
infillPatchmatchDownscaleSize,
infillMethod,
// infillMosaicTileWidth,
// infillMosaicTileHeight,
// infillMosaicMinColor,
// infillMosaicMaxColor,
infillColorValue,
seamlessXAxis,
seamlessYAxis,
canvasCoherenceMode,
canvasCoherenceMinDenoise,
canvasCoherenceEdgeSize,
maskBlur,
} = state.canvasV2.params;
const { refinerModel, refinerStart } = state.canvasV2.params;
if (!model) {
log.error('No model found in state');
throw new Error('No model found in state');
}
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
// We may need to set the inpaint width and height to scale the image
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const fp32 = vaePrecision === 'fp32';
const is_intermediate = true;
const isUsingScaledDimensions = ['auto', 'manual'].includes(boundingBoxScaleMethod);
let modelLoaderNodeId = SDXL_MODEL_LOADER;
const use_cpu = shouldUseCpuNoise;
// Construct Style Prompt
const { positivePrompt, negativePrompt, positiveStylePrompt, negativeStylePrompt } = getPresetModifiedPrompts(state);
const graph: NonNullableGraph = {
id: SDXL_CANVAS_OUTPAINT_GRAPH,
nodes: {
[SDXL_MODEL_LOADER]: {
type: 'sdxl_model_loader',
id: SDXL_MODEL_LOADER,
model,
},
[POSITIVE_CONDITIONING]: {
type: 'sdxl_compel_prompt',
id: POSITIVE_CONDITIONING,
is_intermediate,
prompt: positivePrompt,
style: positiveStylePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'sdxl_compel_prompt',
id: NEGATIVE_CONDITIONING,
is_intermediate,
prompt: negativePrompt,
style: negativeStylePrompt,
},
[MASK_FROM_ALPHA]: {
type: 'tomask',
id: MASK_FROM_ALPHA,
is_intermediate,
image: canvasInitImage,
},
[MASK_COMBINE]: {
type: 'mask_combine',
id: MASK_COMBINE,
is_intermediate,
mask2: canvasMaskImage,
},
[INPAINT_IMAGE]: {
type: 'i2l',
id: INPAINT_IMAGE,
is_intermediate,
fp32,
},
[NOISE]: {
type: 'noise',
id: NOISE,
use_cpu,
seed,
is_intermediate,
},
[INPAINT_CREATE_MASK]: {
type: 'create_gradient_mask',
id: INPAINT_CREATE_MASK,
is_intermediate,
coherence_mode: canvasCoherenceMode,
edge_radius: canvasCoherenceEdgeSize,
minimum_denoise: refinerModel ? Math.max(0.2, canvasCoherenceMinDenoise) : canvasCoherenceMinDenoise,
tiled: false,
fp32: fp32,
},
[SDXL_DENOISE_LATENTS]: {
type: 'denoise_latents',
id: SDXL_DENOISE_LATENTS,
is_intermediate,
steps: steps,
cfg_scale: cfg_scale,
cfg_rescale_multiplier,
scheduler: scheduler,
denoising_start: refinerModel ? Math.min(refinerStart, 1 - strength) : 1 - strength,
denoising_end: refinerModel ? refinerStart : 1,
},
[LATENTS_TO_IMAGE]: {
type: 'l2i',
id: LATENTS_TO_IMAGE,
is_intermediate,
fp32,
},
[CANVAS_OUTPUT]: {
type: 'canvas_paste_back',
id: CANVAS_OUTPUT,
is_intermediate: getIsIntermediate(state),
board: getBoardField(state),
use_cache: false,
mask_blur: maskBlur,
},
},
edges: [
// Connect Model Loader To UNet and CLIP
{
source: {
node_id: SDXL_MODEL_LOADER,
field: 'unet',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'unet',
},
},
{
source: {
node_id: SDXL_MODEL_LOADER,
field: 'clip',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: SDXL_MODEL_LOADER,
field: 'clip2',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip2',
},
},
{
source: {
node_id: SDXL_MODEL_LOADER,
field: 'clip',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: SDXL_MODEL_LOADER,
field: 'clip2',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip2',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'unet',
},
},
// Connect Infill Result To Inpaint Image
{
source: {
node_id: INPAINT_INFILL,
field: 'image',
},
destination: {
node_id: INPAINT_IMAGE,
field: 'image',
},
},
// Combine Mask from Init Image with User Painted Mask
{
source: {
node_id: MASK_FROM_ALPHA,
field: 'image',
},
destination: {
node_id: MASK_COMBINE,
field: 'mask1',
},
},
// Plug Everything Into Inpaint Node
{
source: {
node_id: POSITIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'positive_conditioning',
},
},
{
source: {
node_id: NEGATIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'negative_conditioning',
},
},
{
source: {
node_id: NOISE,
field: 'noise',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'noise',
},
},
{
source: {
node_id: INPAINT_IMAGE,
field: 'latents',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'latents',
},
},
// Create Inpaint Mask
{
source: {
node_id: isUsingScaledDimensions ? MASK_RESIZE_UP : MASK_COMBINE,
field: 'image',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'mask',
},
},
{
source: {
node_id: INPAINT_CREATE_MASK,
field: 'denoise_mask',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'denoise_mask',
},
},
{
source: {
node_id: SDXL_DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
],
};
// Add Infill Nodes
if (infillMethod === 'patchmatch') {
graph.nodes[INPAINT_INFILL] = {
type: 'infill_patchmatch',
id: INPAINT_INFILL,
is_intermediate,
downscale: infillPatchmatchDownscaleSize,
};
}
if (infillMethod === 'lama') {
graph.nodes[INPAINT_INFILL] = {
type: 'infill_lama',
id: INPAINT_INFILL,
is_intermediate,
};
}
if (infillMethod === 'cv2') {
graph.nodes[INPAINT_INFILL] = {
type: 'infill_cv2',
id: INPAINT_INFILL,
is_intermediate,
};
}
if (infillMethod === 'tile') {
graph.nodes[INPAINT_INFILL] = {
type: 'infill_tile',
id: INPAINT_INFILL,
is_intermediate,
tile_size: infillTileSize,
};
}
// TODO: add mosaic back
// if (infillMethod === 'mosaic') {
// graph.nodes[INPAINT_INFILL] = {
// type: 'infill_mosaic',
// id: INPAINT_INFILL,
// is_intermediate,
// tile_width: infillMosaicTileWidth,
// tile_height: infillMosaicTileHeight,
// min_color: infillMosaicMinColor,
// max_color: infillMosaicMaxColor,
// };
// }
if (infillMethod === 'color') {
graph.nodes[INPAINT_INFILL] = {
type: 'infill_rgba',
id: INPAINT_INFILL,
is_intermediate,
color: infillColorValue,
};
}
// Handle Scale Before Processing
if (isUsingScaledDimensions) {
const scaledWidth: number = scaledBoundingBoxDimensions.width;
const scaledHeight: number = scaledBoundingBoxDimensions.height;
// Add Scaling Nodes
graph.nodes[INPAINT_IMAGE_RESIZE_UP] = {
type: 'img_resize',
id: INPAINT_IMAGE_RESIZE_UP,
is_intermediate,
width: scaledWidth,
height: scaledHeight,
image: canvasInitImage,
};
graph.nodes[MASK_RESIZE_UP] = {
type: 'img_resize',
id: MASK_RESIZE_UP,
is_intermediate,
width: scaledWidth,
height: scaledHeight,
};
graph.nodes[INPAINT_IMAGE_RESIZE_DOWN] = {
type: 'img_resize',
id: INPAINT_IMAGE_RESIZE_DOWN,
is_intermediate,
width: width,
height: height,
};
graph.nodes[INPAINT_INFILL_RESIZE_DOWN] = {
type: 'img_resize',
id: INPAINT_INFILL_RESIZE_DOWN,
is_intermediate,
width: width,
height: height,
};
graph.nodes[MASK_RESIZE_DOWN] = {
type: 'img_resize',
id: MASK_RESIZE_DOWN,
is_intermediate,
width: width,
height: height,
};
(graph.nodes[NOISE] as Invocation<'noise'>).width = scaledWidth;
(graph.nodes[NOISE] as Invocation<'noise'>).height = scaledHeight;
// Connect Nodes
graph.edges.push(
// Scale Inpaint Image
{
source: {
node_id: INPAINT_IMAGE_RESIZE_UP,
field: 'image',
},
destination: {
node_id: INPAINT_INFILL,
field: 'image',
},
},
{
source: {
node_id: INPAINT_IMAGE_RESIZE_UP,
field: 'image',
},
destination: {
node_id: INPAINT_CREATE_MASK,
field: 'image',
},
},
// Take combined mask and resize
{
source: {
node_id: MASK_COMBINE,
field: 'image',
},
destination: {
node_id: MASK_RESIZE_UP,
field: 'image',
},
},
// Resize Results Down
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: INPAINT_IMAGE_RESIZE_DOWN,
field: 'image',
},
},
{
source: {
node_id: INPAINT_CREATE_MASK,
field: 'expanded_mask_area',
},
destination: {
node_id: MASK_RESIZE_DOWN,
field: 'image',
},
},
{
source: {
node_id: INPAINT_INFILL,
field: 'image',
},
destination: {
node_id: INPAINT_INFILL_RESIZE_DOWN,
field: 'image',
},
},
// Paste Back
{
source: {
node_id: INPAINT_INFILL_RESIZE_DOWN,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'source_image',
},
},
{
source: {
node_id: INPAINT_IMAGE_RESIZE_DOWN,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'target_image',
},
},
{
source: {
node_id: MASK_RESIZE_DOWN,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'mask',
},
}
);
} else {
// Add Images To Nodes
graph.nodes[INPAINT_INFILL] = {
...(graph.nodes[INPAINT_INFILL] as Invocation<'infill_tile'> | Invocation<'infill_patchmatch'>),
image: canvasInitImage,
};
(graph.nodes[NOISE] as Invocation<'noise'>).width = width;
(graph.nodes[NOISE] as Invocation<'noise'>).height = height;
graph.nodes[INPAINT_IMAGE] = {
...(graph.nodes[INPAINT_IMAGE] as Invocation<'i2l'>),
image: canvasInitImage,
};
graph.edges.push(
{
source: {
node_id: INPAINT_INFILL,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'source_image',
},
},
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'target_image',
},
},
{
source: {
node_id: INPAINT_CREATE_MASK,
field: 'expanded_mask_area',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'mask',
},
}
);
}
addCoreMetadataNode(
graph,
{
generation_mode: 'sdxl_outpaint',
_canvas_objects: state.canvas.layerState.objects,
},
CANVAS_OUTPUT
);
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// Add Refiner if enabled
if (refinerModel) {
await addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId);
if (seamlessXAxis || seamlessYAxis) {
modelLoaderNodeId = SDXL_REFINER_SEAMLESS;
}
}
// Add VAE
await addVAEToGraph(state, graph, modelLoaderNodeId);
// add LoRA support
await addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId);
// add controlnet, mutating `graph`
await addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);
// Add IP Adapter
await addIPAdapterToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);
await addT2IAdaptersToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);
// NSFW & watermark - must be last thing added to graph
if (state.system.shouldUseNSFWChecker) {
// must add before watermarker!
addNSFWCheckerToGraph(state, graph, CANVAS_OUTPUT);
}
if (state.system.shouldUseWatermarker) {
// must add after nsfw checker!
addWatermarkerToGraph(state, graph, CANVAS_OUTPUT);
}
return graph;
};

View File

@ -1,341 +0,0 @@
import { logger } from 'app/logging/logger';
import type { RootState } from 'app/store/store';
import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetchingHelpers';
import { addCoreMetadataNode, getModelMetadataField } from 'features/nodes/util/graph/canvas/metadata';
import {
CANVAS_OUTPUT,
LATENTS_TO_IMAGE,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH,
SDXL_DENOISE_LATENTS,
SDXL_MODEL_LOADER,
SDXL_REFINER_SEAMLESS,
SEAMLESS,
} from 'features/nodes/util/graph/constants';
import {
getBoardField,
getIsIntermediate,
getPresetModifiedPrompts,
} from 'features/nodes/util/graph/graphBuilderUtils';
import { isNonRefinerMainModelConfig, type NonNullableGraph } from 'services/api/types';
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addIPAdapterToLinearGraph } from './addIPAdapterToLinearGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph';
import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addT2IAdaptersToLinearGraph } from './addT2IAdapterToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
/**
* Builds the Canvas tab's Text to Image graph.
*/
export const buildCanvasSDXLTextToImageGraph = async (state: RootState): Promise<NonNullableGraph> => {
const log = logger('nodes');
const {
model,
cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
scheduler,
seed,
steps,
vaePrecision,
shouldUseCpuNoise,
seamlessXAxis,
seamlessYAxis,
} = state.canvasV2.params;
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const fp32 = vaePrecision === 'fp32';
const is_intermediate = true;
const isUsingScaledDimensions = ['auto', 'manual'].includes(boundingBoxScaleMethod);
const { refinerModel, refinerStart } = state.canvasV2.params;
if (!model) {
log.error('No model found in state');
throw new Error('No model found in state');
}
const use_cpu = shouldUseCpuNoise;
let modelLoaderNodeId = SDXL_MODEL_LOADER;
// Construct Style Prompt
const { positivePrompt, negativePrompt, positiveStylePrompt, negativeStylePrompt } = getPresetModifiedPrompts(state);
/**
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
* full graph here as a template. Then use the parameters from app state and set friendlier node
* ids.
*
* The only thing we need extra logic for is handling randomized seed, control net, and for img2img,
* the `fit` param. These are added to the graph at the end.
*/
// copy-pasted graph from node editor, filled in with state values & friendly node ids
const graph: NonNullableGraph = {
id: SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH,
nodes: {
[modelLoaderNodeId]: {
type: 'sdxl_model_loader',
id: modelLoaderNodeId,
is_intermediate,
model,
},
[POSITIVE_CONDITIONING]: {
type: 'sdxl_compel_prompt',
id: POSITIVE_CONDITIONING,
is_intermediate,
prompt: positivePrompt,
style: positiveStylePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'sdxl_compel_prompt',
id: NEGATIVE_CONDITIONING,
is_intermediate,
prompt: negativePrompt,
style: negativeStylePrompt,
},
[NOISE]: {
type: 'noise',
id: NOISE,
is_intermediate,
seed,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions ? height : scaledBoundingBoxDimensions.height,
use_cpu,
},
[SDXL_DENOISE_LATENTS]: {
type: 'denoise_latents',
id: SDXL_DENOISE_LATENTS,
is_intermediate,
cfg_scale,
cfg_rescale_multiplier,
scheduler,
steps,
denoising_start: 0,
denoising_end: refinerModel ? refinerStart : 1,
},
},
edges: [
// Connect Model Loader to UNet and CLIP
{
source: {
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'unet',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'clip2',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip2',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'clip2',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip2',
},
},
// Connect everything to Denoise Latents
{
source: {
node_id: POSITIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'positive_conditioning',
},
},
{
source: {
node_id: NEGATIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'negative_conditioning',
},
},
{
source: {
node_id: NOISE,
field: 'noise',
},
destination: {
node_id: SDXL_DENOISE_LATENTS,
field: 'noise',
},
},
],
};
// Decode Latents To Image & Handle Scaled Before Processing
if (isUsingScaledDimensions) {
graph.nodes[LATENTS_TO_IMAGE] = {
id: LATENTS_TO_IMAGE,
type: 'l2i',
is_intermediate,
fp32,
};
graph.nodes[CANVAS_OUTPUT] = {
id: CANVAS_OUTPUT,
type: 'img_resize',
is_intermediate: getIsIntermediate(state),
board: getBoardField(state),
width: width,
height: height,
use_cache: false,
};
graph.edges.push(
{
source: {
node_id: SDXL_DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'image',
},
}
);
} else {
graph.nodes[CANVAS_OUTPUT] = {
type: 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: getIsIntermediate(state),
board: getBoardField(state),
fp32,
use_cache: false,
};
graph.edges.push({
source: {
node_id: SDXL_DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'latents',
},
});
}
const modelConfig = await fetchModelConfigWithTypeGuard(model.key, isNonRefinerMainModelConfig);
addCoreMetadataNode(
graph,
{
generation_mode: 'txt2img',
cfg_scale,
cfg_rescale_multiplier,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions ? height : scaledBoundingBoxDimensions.height,
positive_prompt: positivePrompt,
negative_prompt: negativePrompt,
positive_style_prompt: positiveStylePrompt,
negative_style_prompt: negativeStylePrompt,
model: getModelMetadataField(modelConfig),
seed,
steps,
rand_device: use_cpu ? 'cpu' : 'cuda',
scheduler,
_canvas_objects: state.canvas.layerState.objects,
},
CANVAS_OUTPUT
);
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// Add Refiner if enabled
if (refinerModel) {
await addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId);
if (seamlessXAxis || seamlessYAxis) {
modelLoaderNodeId = SDXL_REFINER_SEAMLESS;
}
}
// add LoRA support
await addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId);
// optionally add custom VAE
await addVAEToGraph(state, graph, modelLoaderNodeId);
// add controlnet, mutating `graph`
await addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);
// Add IP Adapter
await addIPAdapterToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);
await addT2IAdaptersToLinearGraph(state, graph, SDXL_DENOISE_LATENTS);
// NSFW & watermark - must be last thing added to graph
if (state.system.shouldUseNSFWChecker) {
// must add before watermarker!
addNSFWCheckerToGraph(state, graph, CANVAS_OUTPUT);
}
if (state.system.shouldUseWatermarker) {
// must add after nsfw checker!
addWatermarkerToGraph(state, graph, CANVAS_OUTPUT);
}
return graph;
};

View File

@ -1,324 +0,0 @@
import { logger } from 'app/logging/logger';
import type { RootState } from 'app/store/store';
import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetchingHelpers';
import { addCoreMetadataNode, getModelMetadataField } from 'features/nodes/util/graph/canvas/metadata';
import {
CANVAS_OUTPUT,
CANVAS_TEXT_TO_IMAGE_GRAPH,
CLIP_SKIP,
DENOISE_LATENTS,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
SEAMLESS,
} from 'features/nodes/util/graph/constants';
import {
getBoardField,
getIsIntermediate,
getPresetModifiedPrompts,
} from 'features/nodes/util/graph/graphBuilderUtils';
import { isNonRefinerMainModelConfig, type NonNullableGraph } from 'services/api/types';
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addIPAdapterToLinearGraph } from './addIPAdapterToLinearGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSeamlessToLinearGraph } from './addSeamlessToLinearGraph';
import { addT2IAdaptersToLinearGraph } from './addT2IAdapterToLinearGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
/**
* Builds the Canvas tab's Text to Image graph.
*/
export const buildCanvasTextToImageGraph = async (state: RootState): Promise<NonNullableGraph> => {
const log = logger('nodes');
const {
model,
cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
scheduler,
seed,
steps,
vaePrecision,
clipSkip,
shouldUseCpuNoise,
seamlessXAxis,
seamlessYAxis,
} = state.canvasV2.params;
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const fp32 = vaePrecision === 'fp32';
const is_intermediate = true;
const isUsingScaledDimensions = ['auto', 'manual'].includes(boundingBoxScaleMethod);
if (!model) {
log.error('No model found in state');
throw new Error('No model found in state');
}
const use_cpu = shouldUseCpuNoise;
let modelLoaderNodeId = MAIN_MODEL_LOADER;
const { positivePrompt, negativePrompt } = getPresetModifiedPrompts(state);
/**
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
* full graph here as a template. Then use the parameters from app state and set friendlier node
* ids.
*
* The only thing we need extra logic for is handling randomized seed, control net, and for img2img,
* the `fit` param. These are added to the graph at the end.
*/
// copy-pasted graph from node editor, filled in with state values & friendly node ids
const graph: NonNullableGraph = {
id: CANVAS_TEXT_TO_IMAGE_GRAPH,
nodes: {
[modelLoaderNodeId]: {
type: 'main_model_loader',
id: modelLoaderNodeId,
is_intermediate,
model,
},
[CLIP_SKIP]: {
type: 'clip_skip',
id: CLIP_SKIP,
is_intermediate,
skipped_layers: clipSkip,
},
[POSITIVE_CONDITIONING]: {
type: 'compel',
id: POSITIVE_CONDITIONING,
is_intermediate,
prompt: positivePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'compel',
id: NEGATIVE_CONDITIONING,
is_intermediate,
prompt: negativePrompt,
},
[NOISE]: {
type: 'noise',
id: NOISE,
is_intermediate,
seed,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions ? height : scaledBoundingBoxDimensions.height,
use_cpu,
},
[DENOISE_LATENTS]: {
type: 'denoise_latents',
id: DENOISE_LATENTS,
is_intermediate,
cfg_scale,
cfg_rescale_multiplier,
scheduler,
steps,
denoising_start: 0,
denoising_end: 1,
},
},
edges: [
// Connect Model Loader to UNet & CLIP Skip
{
source: {
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'unet',
},
},
{
source: {
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
node_id: CLIP_SKIP,
field: 'clip',
},
},
// Connect CLIP Skip to Conditioning
{
source: {
node_id: CLIP_SKIP,
field: 'clip',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: CLIP_SKIP,
field: 'clip',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
},
// Connect everything to Denoise Latents
{
source: {
node_id: POSITIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'positive_conditioning',
},
},
{
source: {
node_id: NEGATIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'negative_conditioning',
},
},
{
source: {
node_id: NOISE,
field: 'noise',
},
destination: {
node_id: DENOISE_LATENTS,
field: 'noise',
},
},
],
};
// Decode Latents To Image & Handle Scaled Before Processing
if (isUsingScaledDimensions) {
graph.nodes[LATENTS_TO_IMAGE] = {
id: LATENTS_TO_IMAGE,
type: 'l2i',
is_intermediate,
fp32,
};
graph.nodes[CANVAS_OUTPUT] = {
id: CANVAS_OUTPUT,
type: 'img_resize',
is_intermediate: getIsIntermediate(state),
board: getBoardField(state),
width: width,
height: height,
use_cache: false,
};
graph.edges.push(
{
source: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'image',
},
}
);
} else {
graph.nodes[CANVAS_OUTPUT] = {
type: 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: getIsIntermediate(state),
board: getBoardField(state),
fp32,
use_cache: false,
};
graph.edges.push({
source: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'latents',
},
});
}
const modelConfig = await fetchModelConfigWithTypeGuard(model.key, isNonRefinerMainModelConfig);
addCoreMetadataNode(
graph,
{
generation_mode: 'txt2img',
cfg_scale,
cfg_rescale_multiplier,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions ? height : scaledBoundingBoxDimensions.height,
positive_prompt: positivePrompt,
negative_prompt: negativePrompt,
model: getModelMetadataField(modelConfig),
seed,
steps,
rand_device: use_cpu ? 'cpu' : 'cuda',
scheduler,
clip_skip: clipSkip,
_canvas_objects: state.canvas.layerState.objects,
},
CANVAS_OUTPUT
);
// Add Seamless To Graph
if (seamlessXAxis || seamlessYAxis) {
addSeamlessToLinearGraph(state, graph, modelLoaderNodeId);
modelLoaderNodeId = SEAMLESS;
}
// optionally add custom VAE
await addVAEToGraph(state, graph, modelLoaderNodeId);
// add LoRA support
await addLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId);
// add controlnet, mutating `graph`
await addControlNetToLinearGraph(state, graph, DENOISE_LATENTS);
// Add IP Adapter
await addIPAdapterToLinearGraph(state, graph, DENOISE_LATENTS);
await addT2IAdaptersToLinearGraph(state, graph, DENOISE_LATENTS);
// NSFW & watermark - must be last thing added to graph
if (state.system.shouldUseNSFWChecker) {
// must add before watermarker!
addNSFWCheckerToGraph(state, graph, CANVAS_OUTPUT);
}
if (state.system.shouldUseWatermarker) {
// must add after nsfw checker!
addWatermarkerToGraph(state, graph, CANVAS_OUTPUT);
}
return graph;
};

View File

@ -1,66 +0,0 @@
import type { JSONObject } from 'common/types';
import type { ModelIdentifierField } from 'features/nodes/types/common';
import { METADATA } from 'features/nodes/util/graph/constants';
import type { AnyModelConfig, NonNullableGraph, S } from 'services/api/types';
export const addCoreMetadataNode = (
graph: NonNullableGraph,
metadata: Partial<S['CoreMetadataInvocation']>,
nodeId: string
): void => {
graph.nodes[METADATA] = {
id: METADATA,
type: 'core_metadata',
...metadata,
};
graph.edges.push({
source: {
node_id: METADATA,
field: 'metadata',
},
destination: {
node_id: nodeId,
field: 'metadata',
},
});
return;
};
export const upsertMetadata = (
graph: NonNullableGraph,
metadata: Partial<S['CoreMetadataInvocation']> | JSONObject
): void => {
const metadataNode = graph.nodes[METADATA] as S['CoreMetadataInvocation'] | undefined;
if (!metadataNode) {
return;
}
Object.assign(metadataNode, metadata);
};
export const removeMetadata = (graph: NonNullableGraph, key: keyof S['CoreMetadataInvocation']): void => {
const metadataNode = graph.nodes[METADATA] as S['CoreMetadataInvocation'] | undefined;
if (!metadataNode) {
return;
}
delete metadataNode[key];
};
export const getHasMetadata = (graph: NonNullableGraph): boolean => {
const metadataNode = graph.nodes[METADATA] as S['CoreMetadataInvocation'] | undefined;
return Boolean(metadataNode);
};
export const getModelMetadataField = ({ key, hash, name, base, type }: AnyModelConfig): ModelIdentifierField => ({
key,
hash,
name,
base,
type,
});