feat(ui): update UI for new metadata

- Update for new routes
- Update model storage in state to be `MainModelField` type instead of `string`, simplifies a lot of model handling
- Update model-related stuff for model `name` --> `model_name`
- Update linear graphs to use `MetadataAccumulator`
- Update `ImageMetadataViewer` UI
- Ensure all `recall` functions work (well, the ones that are active anyways)
This commit is contained in:
psychedelicious
2023-07-13 01:18:32 +10:00
parent bddc04af96
commit a43c900961
39 changed files with 1060 additions and 669 deletions

View File

@ -1,94 +0,0 @@
import { RootState } from 'app/store/store';
import { getValidControlNets } from 'features/controlNet/util/getValidControlNets';
import { CollectInvocation, ControlNetInvocation } from 'services/api/types';
import { NonNullableGraph } from '../types/types';
import { CONTROL_NET_COLLECT } from './graphBuilders/constants';
export const addControlNetToLinearGraph = (
graph: NonNullableGraph,
baseNodeId: string,
state: RootState
): void => {
const { isEnabled: isControlNetEnabled, controlNets } = state.controlNet;
const validControlNets = getValidControlNets(controlNets);
if (isControlNetEnabled && Boolean(validControlNets.length)) {
if (validControlNets.length > 1) {
// We have multiple controlnets, add ControlNet collector
const controlNetIterateNode: CollectInvocation = {
id: CONTROL_NET_COLLECT,
type: 'collect',
};
graph.nodes[controlNetIterateNode.id] = controlNetIterateNode;
graph.edges.push({
source: { node_id: controlNetIterateNode.id, field: 'collection' },
destination: {
node_id: baseNodeId,
field: 'control',
},
});
}
validControlNets.forEach((controlNet) => {
const {
controlNetId,
controlImage,
processedControlImage,
beginStepPct,
endStepPct,
controlMode,
model,
processorType,
weight,
} = controlNet;
const controlNetNode: ControlNetInvocation = {
id: `control_net_${controlNetId}`,
type: 'controlnet',
begin_step_percent: beginStepPct,
end_step_percent: endStepPct,
control_mode: controlMode,
control_model: model as ControlNetInvocation['control_model'],
control_weight: weight,
};
if (processedControlImage && processorType !== 'none') {
// We've already processed the image in the app, so we can just use the processed image
controlNetNode.image = {
image_name: processedControlImage,
};
} else if (controlImage) {
// The control image is preprocessed
controlNetNode.image = {
image_name: controlImage,
};
} else {
// Skip ControlNets without an unprocessed image - should never happen if everything is working correctly
return;
}
graph.nodes[controlNetNode.id] = controlNetNode;
if (validControlNets.length > 1) {
// if we have multiple controlnets, link to the collector
graph.edges.push({
source: { node_id: controlNetNode.id, field: 'control' },
destination: {
node_id: CONTROL_NET_COLLECT,
field: 'item',
},
});
} else {
// otherwise, link directly to the base node
graph.edges.push({
source: { node_id: controlNetNode.id, field: 'control' },
destination: {
node_id: baseNodeId,
field: 'control',
},
});
}
});
}
};

View File

@ -1,40 +0,0 @@
import {
Edge,
ImageToImageInvocation,
InpaintInvocation,
IterateInvocation,
RandomRangeInvocation,
RangeInvocation,
TextToImageInvocation,
} from 'services/api/types';
export const buildEdges = (
baseNode: TextToImageInvocation | ImageToImageInvocation | InpaintInvocation,
rangeNode: RangeInvocation | RandomRangeInvocation,
iterateNode: IterateInvocation
): Edge[] => {
const edges: Edge[] = [
{
source: {
node_id: rangeNode.id,
field: 'collection',
},
destination: {
node_id: iterateNode.id,
field: 'collection',
},
},
{
source: {
node_id: iterateNode.id,
field: 'item',
},
destination: {
node_id: baseNode.id,
field: 'seed',
},
},
];
return edges;
};

View File

@ -0,0 +1,100 @@
import { RootState } from 'app/store/store';
import { getValidControlNets } from 'features/controlNet/util/getValidControlNets';
import { omit } from 'lodash-es';
import {
CollectInvocation,
ControlField,
ControlNetInvocation,
MetadataAccumulatorInvocation,
} from 'services/api/types';
import { NonNullableGraph } from '../../types/types';
import { CONTROL_NET_COLLECT, METADATA_ACCUMULATOR } from './constants';
export const addControlNetToLinearGraph = (
state: RootState,
graph: NonNullableGraph,
baseNodeId: string
): void => {
const { isEnabled: isControlNetEnabled, controlNets } = state.controlNet;
const validControlNets = getValidControlNets(controlNets);
const metadataAccumulator = graph.nodes[
METADATA_ACCUMULATOR
] as MetadataAccumulatorInvocation;
if (isControlNetEnabled && Boolean(validControlNets.length)) {
if (validControlNets.length) {
// We have multiple controlnets, add ControlNet collector
const controlNetIterateNode: CollectInvocation = {
id: CONTROL_NET_COLLECT,
type: 'collect',
};
graph.nodes[CONTROL_NET_COLLECT] = controlNetIterateNode;
graph.edges.push({
source: { node_id: CONTROL_NET_COLLECT, field: 'collection' },
destination: {
node_id: baseNodeId,
field: 'control',
},
});
validControlNets.forEach((controlNet) => {
const {
controlNetId,
controlImage,
processedControlImage,
beginStepPct,
endStepPct,
controlMode,
model,
processorType,
weight,
} = controlNet;
const controlNetNode: ControlNetInvocation = {
id: `control_net_${controlNetId}`,
type: 'controlnet',
begin_step_percent: beginStepPct,
end_step_percent: endStepPct,
control_mode: controlMode,
control_model: model as ControlNetInvocation['control_model'],
control_weight: weight,
};
if (processedControlImage && processorType !== 'none') {
// We've already processed the image in the app, so we can just use the processed image
controlNetNode.image = {
image_name: processedControlImage,
};
} else if (controlImage) {
// The control image is preprocessed
controlNetNode.image = {
image_name: controlImage,
};
} else {
// Skip ControlNets without an unprocessed image - should never happen if everything is working correctly
return;
}
graph.nodes[controlNetNode.id] = controlNetNode;
// metadata accumulator only needs a control field - not the whole node
// extract what we need and add to the accumulator
const controlField = omit(controlNetNode, [
'id',
'type',
]) as ControlField;
metadataAccumulator.controlnets.push(controlField);
graph.edges.push({
source: { node_id: controlNetNode.id, field: 'control' },
destination: {
node_id: CONTROL_NET_COLLECT,
field: 'item',
},
});
});
}
}
};

View File

@ -1,8 +1,10 @@
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { unset } from 'lodash-es';
import {
DynamicPromptInvocation,
IterateInvocation,
MetadataAccumulatorInvocation,
NoiseInvocation,
RandomIntInvocation,
RangeOfSizeInvocation,
@ -10,16 +12,16 @@ import {
import {
DYNAMIC_PROMPT,
ITERATE,
METADATA_ACCUMULATOR,
NOISE,
POSITIVE_CONDITIONING,
RANDOM_INT,
RANGE_OF_SIZE,
} from './constants';
import { unset } from 'lodash-es';
export const addDynamicPromptsToGraph = (
graph: NonNullableGraph,
state: RootState
state: RootState,
graph: NonNullableGraph
): void => {
const { positivePrompt, iterations, seed, shouldRandomizeSeed } =
state.generation;
@ -30,6 +32,10 @@ export const addDynamicPromptsToGraph = (
maxPrompts,
} = state.dynamicPrompts;
const metadataAccumulator = graph.nodes[
METADATA_ACCUMULATOR
] as MetadataAccumulatorInvocation;
if (isDynamicPromptsEnabled) {
// iteration is handled via dynamic prompts
unset(graph.nodes[POSITIVE_CONDITIONING], 'prompt');
@ -74,6 +80,18 @@ export const addDynamicPromptsToGraph = (
}
);
// hook up positive prompt to metadata
graph.edges.push({
source: {
node_id: ITERATE,
field: 'item',
},
destination: {
node_id: METADATA_ACCUMULATOR,
field: 'positive_prompt',
},
});
if (shouldRandomizeSeed) {
// Random int node to generate the starting seed
const randomIntNode: RandomIntInvocation = {
@ -88,11 +106,22 @@ export const addDynamicPromptsToGraph = (
source: { node_id: RANDOM_INT, field: 'a' },
destination: { node_id: NOISE, field: 'seed' },
});
graph.edges.push({
source: { node_id: RANDOM_INT, field: 'a' },
destination: { node_id: METADATA_ACCUMULATOR, field: 'seed' },
});
} else {
// User specified seed, so set the start of the range of size to the seed
(graph.nodes[NOISE] as NoiseInvocation).seed = seed;
// hook up seed to metadata
metadataAccumulator.seed = seed;
}
} else {
// no dynamic prompt - hook up positive prompt
metadataAccumulator.positive_prompt = positivePrompt;
const rangeOfSizeNode: RangeOfSizeInvocation = {
id: RANGE_OF_SIZE,
type: 'range_of_size',
@ -130,6 +159,18 @@ export const addDynamicPromptsToGraph = (
},
});
// hook up seed to metadata
graph.edges.push({
source: {
node_id: ITERATE,
field: 'item',
},
destination: {
node_id: METADATA_ACCUMULATOR,
field: 'seed',
},
});
// handle seed
if (shouldRandomizeSeed) {
// Random int node to generate the starting seed

View File

@ -1,19 +1,23 @@
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { forEach, size } from 'lodash-es';
import { LoraLoaderInvocation } from 'services/api/types';
import {
LoraLoaderInvocation,
MetadataAccumulatorInvocation,
} from 'services/api/types';
import { modelIdToLoRAModelField } from '../modelIdToLoRAName';
import {
CLIP_SKIP,
LORA_LOADER,
MAIN_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
POSITIVE_CONDITIONING,
} from './constants';
export const addLoRAsToGraph = (
graph: NonNullableGraph,
state: RootState,
graph: NonNullableGraph,
baseNodeId: string
): void => {
/**
@ -26,6 +30,9 @@ export const addLoRAsToGraph = (
const { loras } = state.lora;
const loraCount = size(loras);
const metadataAccumulator = graph.nodes[
METADATA_ACCUMULATOR
] as MetadataAccumulatorInvocation;
if (loraCount > 0) {
// Remove MAIN_MODEL_LOADER unet connection to feed it to LoRAs
@ -62,6 +69,10 @@ export const addLoRAsToGraph = (
weight,
};
// add the lora to the metadata accumulator
metadataAccumulator.loras.push({ lora: loraField, weight });
// add to graph
graph.nodes[currentLoraNodeId] = loraLoaderNode;
if (currentLoraIndex === 0) {

View File

@ -1,5 +1,6 @@
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { MetadataAccumulatorInvocation } from 'services/api/types';
import { modelIdToVAEModelField } from '../modelIdToVAEModelField';
import {
IMAGE_TO_IMAGE_GRAPH,
@ -8,18 +9,22 @@ import {
INPAINT_GRAPH,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
METADATA_ACCUMULATOR,
TEXT_TO_IMAGE_GRAPH,
VAE_LOADER,
} from './constants';
export const addVAEToGraph = (
graph: NonNullableGraph,
state: RootState
state: RootState,
graph: NonNullableGraph
): void => {
const { vae } = state.generation;
const vae_model = modelIdToVAEModelField(vae?.id || '');
const isAutoVae = !vae;
const metadataAccumulator = graph.nodes[
METADATA_ACCUMULATOR
] as MetadataAccumulatorInvocation;
if (!isAutoVae) {
graph.nodes[VAE_LOADER] = {
@ -67,4 +72,8 @@ export const addVAEToGraph = (
},
});
}
if (vae) {
metadataAccumulator.vae = vae_model;
}
};

View File

@ -7,8 +7,7 @@ import {
ImageResizeInvocation,
ImageToLatentsInvocation,
} from 'services/api/types';
import { addControlNetToLinearGraph } from '../addControlNetToLinearGraph';
import { modelIdToMainModelField } from '../modelIdToMainModelField';
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addVAEToGraph } from './addVAEToGraph';
@ -19,6 +18,7 @@ import {
LATENTS_TO_IMAGE,
LATENTS_TO_LATENTS,
MAIN_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
@ -37,7 +37,7 @@ export const buildCanvasImageToImageGraph = (
const {
positivePrompt,
negativePrompt,
model: currentModel,
model,
cfgScale: cfg_scale,
scheduler,
steps,
@ -50,7 +50,10 @@ export const buildCanvasImageToImageGraph = (
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const model = modelIdToMainModelField(currentModel?.id || '');
if (!model) {
moduleLog.error('No model found in state');
throw new Error('No model found in state');
}
const use_cpu = shouldUseNoiseSettings
? shouldUseCpuNoise
@ -275,16 +278,51 @@ export const buildCanvasImageToImageGraph = (
});
}
addLoRAsToGraph(graph, state, LATENTS_TO_LATENTS);
// add metadata accumulator, which is only mostly populated - some fields are added later
graph.nodes[METADATA_ACCUMULATOR] = {
id: METADATA_ACCUMULATOR,
type: 'metadata_accumulator',
generation_mode: 'img2img',
cfg_scale,
height,
width,
positive_prompt: '', // set in addDynamicPromptsToGraph
negative_prompt: negativePrompt,
model,
seed: 0, // set in addDynamicPromptsToGraph
steps,
rand_device: use_cpu ? 'cpu' : 'cuda',
scheduler,
vae: undefined, // option; set in addVAEToGraph
controlnets: [], // populated in addControlNetToLinearGraph
loras: [], // populated in addLoRAsToGraph
clip_skip: clipSkip,
strength,
init_image: initialImage.image_name,
};
// Add VAE
addVAEToGraph(graph, state);
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'metadata',
},
});
// add dynamic prompts, mutating `graph`
addDynamicPromptsToGraph(graph, state);
// add LoRA support
addLoRAsToGraph(state, graph, LATENTS_TO_LATENTS);
// optionally add custom VAE
addVAEToGraph(state, graph);
// add dynamic prompts - also sets up core iteration and seed
addDynamicPromptsToGraph(state, graph);
// add controlnet, mutating `graph`
addControlNetToLinearGraph(graph, LATENTS_TO_LATENTS, state);
addControlNetToLinearGraph(state, graph, LATENTS_TO_LATENTS);
return graph;
};

View File

@ -212,10 +212,10 @@ export const buildCanvasInpaintGraph = (
],
};
addLoRAsToGraph(graph, state, INPAINT);
addLoRAsToGraph(state, graph, INPAINT);
// Add VAE
addVAEToGraph(graph, state);
addVAEToGraph(state, graph);
// handle seed
if (shouldRandomizeSeed) {

View File

@ -1,8 +1,8 @@
import { log } from 'app/logging/useLogger';
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { initialGenerationState } from 'features/parameters/store/generationSlice';
import { addControlNetToLinearGraph } from '../addControlNetToLinearGraph';
import { modelIdToMainModelField } from '../modelIdToMainModelField';
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addVAEToGraph } from './addVAEToGraph';
@ -10,6 +10,7 @@ import {
CLIP_SKIP,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
@ -17,6 +18,8 @@ import {
TEXT_TO_LATENTS,
} from './constants';
const moduleLog = log.child({ namespace: 'nodes' });
/**
* Builds the Canvas tab's Text to Image graph.
*/
@ -26,7 +29,7 @@ export const buildCanvasTextToImageGraph = (
const {
positivePrompt,
negativePrompt,
model: currentModel,
model,
cfgScale: cfg_scale,
scheduler,
steps,
@ -38,7 +41,10 @@ export const buildCanvasTextToImageGraph = (
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const model = modelIdToMainModelField(currentModel?.id || '');
if (!model) {
moduleLog.error('No model found in state');
throw new Error('No model found in state');
}
const use_cpu = shouldUseNoiseSettings
? shouldUseCpuNoise
@ -180,16 +186,49 @@ export const buildCanvasTextToImageGraph = (
],
};
addLoRAsToGraph(graph, state, TEXT_TO_LATENTS);
// add metadata accumulator, which is only mostly populated - some fields are added later
graph.nodes[METADATA_ACCUMULATOR] = {
id: METADATA_ACCUMULATOR,
type: 'metadata_accumulator',
generation_mode: 'txt2img',
cfg_scale,
height,
width,
positive_prompt: '', // set in addDynamicPromptsToGraph
negative_prompt: negativePrompt,
model,
seed: 0, // set in addDynamicPromptsToGraph
steps,
rand_device: use_cpu ? 'cpu' : 'cuda',
scheduler,
vae: undefined, // option; set in addVAEToGraph
controlnets: [], // populated in addControlNetToLinearGraph
loras: [], // populated in addLoRAsToGraph
clip_skip: clipSkip,
};
// Add VAE
addVAEToGraph(graph, state);
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'metadata',
},
});
// add dynamic prompts, mutating `graph`
addDynamicPromptsToGraph(graph, state);
// add LoRA support
addLoRAsToGraph(state, graph, TEXT_TO_LATENTS);
// optionally add custom VAE
addVAEToGraph(state, graph);
// add dynamic prompts - also sets up core iteration and seed
addDynamicPromptsToGraph(state, graph);
// add controlnet, mutating `graph`
addControlNetToLinearGraph(graph, TEXT_TO_LATENTS, state);
addControlNetToLinearGraph(state, graph, TEXT_TO_LATENTS);
return graph;
};

View File

@ -3,25 +3,21 @@ import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { initialGenerationState } from 'features/parameters/store/generationSlice';
import {
ImageCollectionInvocation,
ImageResizeInvocation,
ImageToLatentsInvocation,
IterateInvocation,
} from 'services/api/types';
import { addControlNetToLinearGraph } from '../addControlNetToLinearGraph';
import { modelIdToMainModelField } from '../modelIdToMainModelField';
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addVAEToGraph } from './addVAEToGraph';
import {
CLIP_SKIP,
IMAGE_COLLECTION,
IMAGE_COLLECTION_ITERATE,
IMAGE_TO_IMAGE_GRAPH,
IMAGE_TO_LATENTS,
LATENTS_TO_IMAGE,
LATENTS_TO_LATENTS,
MAIN_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
@ -39,7 +35,7 @@ export const buildLinearImageToImageGraph = (
const {
positivePrompt,
negativePrompt,
model: currentModel,
model,
cfgScale: cfg_scale,
scheduler,
steps,
@ -53,14 +49,15 @@ export const buildLinearImageToImageGraph = (
shouldUseNoiseSettings,
} = state.generation;
const {
isEnabled: isBatchEnabled,
imageNames: batchImageNames,
asInitialImage,
} = state.batch;
// TODO: add batch functionality
// const {
// isEnabled: isBatchEnabled,
// imageNames: batchImageNames,
// asInitialImage,
// } = state.batch;
const shouldBatch =
isBatchEnabled && batchImageNames.length > 0 && asInitialImage;
// const shouldBatch =
// isBatchEnabled && batchImageNames.length > 0 && asInitialImage;
/**
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
@ -71,12 +68,15 @@ export const buildLinearImageToImageGraph = (
* the `fit` param. These are added to the graph at the end.
*/
if (!initialImage && !shouldBatch) {
if (!initialImage) {
moduleLog.error('No initial image found in state');
throw new Error('No initial image found in state');
}
const model = modelIdToMainModelField(currentModel?.id || '');
if (!model) {
moduleLog.error('No model found in state');
throw new Error('No model found in state');
}
const use_cpu = shouldUseNoiseSettings
? shouldUseCpuNoise
@ -295,51 +295,87 @@ export const buildLinearImageToImageGraph = (
});
}
if (isBatchEnabled && asInitialImage && batchImageNames.length > 0) {
// we are going to connect an iterate up to the init image
delete (graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image;
// TODO: add batch functionality
// if (isBatchEnabled && asInitialImage && batchImageNames.length > 0) {
// // we are going to connect an iterate up to the init image
// delete (graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image;
const imageCollection: ImageCollectionInvocation = {
id: IMAGE_COLLECTION,
type: 'image_collection',
images: batchImageNames.map((image_name) => ({ image_name })),
};
// const imageCollection: ImageCollectionInvocation = {
// id: IMAGE_COLLECTION,
// type: 'image_collection',
// images: batchImageNames.map((image_name) => ({ image_name })),
// };
const imageCollectionIterate: IterateInvocation = {
id: IMAGE_COLLECTION_ITERATE,
type: 'iterate',
};
// const imageCollectionIterate: IterateInvocation = {
// id: IMAGE_COLLECTION_ITERATE,
// type: 'iterate',
// };
graph.nodes[IMAGE_COLLECTION] = imageCollection;
graph.nodes[IMAGE_COLLECTION_ITERATE] = imageCollectionIterate;
// graph.nodes[IMAGE_COLLECTION] = imageCollection;
// graph.nodes[IMAGE_COLLECTION_ITERATE] = imageCollectionIterate;
graph.edges.push({
source: { node_id: IMAGE_COLLECTION, field: 'collection' },
destination: {
node_id: IMAGE_COLLECTION_ITERATE,
field: 'collection',
},
});
// graph.edges.push({
// source: { node_id: IMAGE_COLLECTION, field: 'collection' },
// destination: {
// node_id: IMAGE_COLLECTION_ITERATE,
// field: 'collection',
// },
// });
graph.edges.push({
source: { node_id: IMAGE_COLLECTION_ITERATE, field: 'item' },
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'image',
},
});
}
// graph.edges.push({
// source: { node_id: IMAGE_COLLECTION_ITERATE, field: 'item' },
// destination: {
// node_id: IMAGE_TO_LATENTS,
// field: 'image',
// },
// });
// }
addLoRAsToGraph(graph, state, LATENTS_TO_LATENTS);
// add metadata accumulator, which is only mostly populated - some fields are added later
graph.nodes[METADATA_ACCUMULATOR] = {
id: METADATA_ACCUMULATOR,
type: 'metadata_accumulator',
generation_mode: 'img2img',
cfg_scale,
height,
width,
positive_prompt: '', // set in addDynamicPromptsToGraph
negative_prompt: negativePrompt,
model,
seed: 0, // set in addDynamicPromptsToGraph
steps,
rand_device: use_cpu ? 'cpu' : 'cuda',
scheduler,
vae: undefined, // option; set in addVAEToGraph
controlnets: [], // populated in addControlNetToLinearGraph
loras: [], // populated in addLoRAsToGraph
clip_skip: clipSkip,
strength,
init_image: initialImage.imageName,
};
// Add VAE
addVAEToGraph(graph, state);
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'metadata',
},
});
// add dynamic prompts, mutating `graph`
addDynamicPromptsToGraph(graph, state);
// add LoRA support
addLoRAsToGraph(state, graph, LATENTS_TO_LATENTS);
// optionally add custom VAE
addVAEToGraph(state, graph);
// add dynamic prompts - also sets up core iteration and seed
addDynamicPromptsToGraph(state, graph);
// add controlnet, mutating `graph`
addControlNetToLinearGraph(graph, LATENTS_TO_LATENTS, state);
addControlNetToLinearGraph(state, graph, LATENTS_TO_LATENTS);
return graph;
};

View File

@ -1,8 +1,8 @@
import { log } from 'app/logging/useLogger';
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { initialGenerationState } from 'features/parameters/store/generationSlice';
import { addControlNetToLinearGraph } from '../addControlNetToLinearGraph';
import { modelIdToMainModelField } from '../modelIdToMainModelField';
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addVAEToGraph } from './addVAEToGraph';
@ -10,6 +10,7 @@ import {
CLIP_SKIP,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
@ -17,13 +18,15 @@ import {
TEXT_TO_LATENTS,
} from './constants';
const moduleLog = log.child({ namespace: 'nodes' });
export const buildLinearTextToImageGraph = (
state: RootState
): NonNullableGraph => {
const {
positivePrompt,
negativePrompt,
model: currentModel,
model,
cfgScale: cfg_scale,
scheduler,
steps,
@ -34,12 +37,15 @@ export const buildLinearTextToImageGraph = (
shouldUseNoiseSettings,
} = state.generation;
const model = modelIdToMainModelField(currentModel?.id || '');
const use_cpu = shouldUseNoiseSettings
? shouldUseCpuNoise
: initialGenerationState.shouldUseCpuNoise;
if (!model) {
moduleLog.error('No model found in state');
throw new Error('No model found in state');
}
/**
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
* full graph here as a template. Then use the parameters from app state and set friendlier node
@ -176,16 +182,49 @@ export const buildLinearTextToImageGraph = (
],
};
addLoRAsToGraph(graph, state, TEXT_TO_LATENTS);
// add metadata accumulator, which is only mostly populated - some fields are added later
graph.nodes[METADATA_ACCUMULATOR] = {
id: METADATA_ACCUMULATOR,
type: 'metadata_accumulator',
generation_mode: 'txt2img',
cfg_scale,
height,
width,
positive_prompt: '', // set in addDynamicPromptsToGraph
negative_prompt: negativePrompt,
model,
seed: 0, // set in addDynamicPromptsToGraph
steps,
rand_device: use_cpu ? 'cpu' : 'cuda',
scheduler,
vae: undefined, // option; set in addVAEToGraph
controlnets: [], // populated in addControlNetToLinearGraph
loras: [], // populated in addLoRAsToGraph
clip_skip: clipSkip,
};
// Add Custom VAE Support
addVAEToGraph(graph, state);
graph.edges.push({
source: {
node_id: METADATA_ACCUMULATOR,
field: 'metadata',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'metadata',
},
});
// add dynamic prompts, mutating `graph`
addDynamicPromptsToGraph(graph, state);
// add LoRA support
addLoRAsToGraph(state, graph, TEXT_TO_LATENTS);
// optionally add custom VAE
addVAEToGraph(state, graph);
// add dynamic prompts - also sets up core iteration and seed
addDynamicPromptsToGraph(state, graph);
// add controlnet, mutating `graph`
addControlNetToLinearGraph(graph, TEXT_TO_LATENTS, state);
addControlNetToLinearGraph(state, graph, TEXT_TO_LATENTS);
return graph;
};

View File

@ -19,6 +19,7 @@ export const CONTROL_NET_COLLECT = 'control_net_collect';
export const DYNAMIC_PROMPT = 'dynamic_prompt';
export const IMAGE_COLLECTION = 'image_collection';
export const IMAGE_COLLECTION_ITERATE = 'image_collection_iterate';
export const METADATA_ACCUMULATOR = 'metadata_accumulator';
// friendly graph ids
export const TEXT_TO_IMAGE_GRAPH = 'text_to_image_graph';

View File

@ -5,17 +5,21 @@ import {
InputFieldTemplate,
InvocationSchemaObject,
InvocationTemplate,
isInvocationSchemaObject,
OutputFieldTemplate,
isInvocationSchemaObject,
} from '../types/types';
import {
buildInputFieldTemplate,
buildOutputFieldTemplates,
} from './fieldTemplateBuilders';
const RESERVED_FIELD_NAMES = ['id', 'type', 'is_intermediate'];
const RESERVED_FIELD_NAMES = ['id', 'type', 'is_intermediate', 'core_metadata'];
const invocationDenylist = ['Graph', 'InvocationMeta'];
const invocationDenylist = [
'Graph',
'InvocationMeta',
'MetadataAccumulatorInvocation',
];
export const parseSchema = (openAPI: OpenAPIV3.Document) => {
// filter out non-invocation schemas, plus some tricky invocations for now