Merge branch 'main' into model-manager-ui-30

This commit is contained in:
blessedcoolant
2023-06-19 23:02:59 +12:00
59 changed files with 1909 additions and 3688 deletions

View File

@ -1,11 +1,10 @@
import { startAppListening } from '..';
import { sessionCreated } from 'services/thunks/session';
import { buildCanvasGraphComponents } from 'features/nodes/util/graphBuilders/buildCanvasGraph';
import { buildCanvasGraph } from 'features/nodes/util/graphBuilders/buildCanvasGraph';
import { log } from 'app/logging/useLogger';
import { canvasGraphBuilt } from 'features/nodes/store/actions';
import { imageUpdated, imageUploaded } from 'services/thunks/image';
import { v4 as uuidv4 } from 'uuid';
import { Graph } from 'services/api';
import { ImageDTO } from 'services/api';
import {
canvasSessionIdChanged,
stagingAreaInitialized,
@ -67,112 +66,106 @@ export const addUserInvokedCanvasListener = () => {
moduleLog.debug(`Generation mode: ${generationMode}`);
// Build the canvas graph
const graphComponents = await buildCanvasGraphComponents(
state,
generationMode
);
// Temp placeholders for the init and mask images
let canvasInitImage: ImageDTO | undefined;
let canvasMaskImage: ImageDTO | undefined;
if (!graphComponents) {
moduleLog.error('Problem building graph');
return;
}
const { rangeNode, iterateNode, baseNode, edges } = graphComponents;
// Assemble! Note that this graph *does not have the init or mask image set yet!*
const nodes: Graph['nodes'] = {
[rangeNode.id]: rangeNode,
[iterateNode.id]: iterateNode,
[baseNode.id]: baseNode,
};
const graph = { nodes, edges };
dispatch(canvasGraphBuilt(graph));
moduleLog.debug({ data: graph }, 'Canvas graph built');
// If we are generating img2img or inpaint, we need to upload the init images
if (baseNode.type === 'img2img' || baseNode.type === 'inpaint') {
const baseFilename = `${uuidv4()}.png`;
dispatch(
// For img2img and inpaint/outpaint, we need to upload the init images
if (['img2img', 'inpaint', 'outpaint'].includes(generationMode)) {
// upload the image, saving the request id
const { requestId: initImageUploadedRequestId } = dispatch(
imageUploaded({
formData: {
file: new File([baseBlob], baseFilename, { type: 'image/png' }),
file: new File([baseBlob], 'canvasInitImage.png', {
type: 'image/png',
}),
},
imageCategory: 'general',
isIntermediate: true,
})
);
// Wait for the image to be uploaded
const [{ payload: baseImageDTO }] = await take(
// Wait for the image to be uploaded, matching by request id
const [{ payload }] = await take(
(action): action is ReturnType<typeof imageUploaded.fulfilled> =>
imageUploaded.fulfilled.match(action) &&
action.meta.arg.formData.file.name === baseFilename
action.meta.requestId === initImageUploadedRequestId
);
// Update the base node with the image name and type
baseNode.image = {
image_name: baseImageDTO.image_name,
};
canvasInitImage = payload;
}
// For inpaint, we also need to upload the mask layer
if (baseNode.type === 'inpaint') {
const maskFilename = `${uuidv4()}.png`;
dispatch(
// For inpaint/outpaint, we also need to upload the mask layer
if (['inpaint', 'outpaint'].includes(generationMode)) {
// upload the image, saving the request id
const { requestId: maskImageUploadedRequestId } = dispatch(
imageUploaded({
formData: {
file: new File([maskBlob], maskFilename, { type: 'image/png' }),
file: new File([maskBlob], 'canvasMaskImage.png', {
type: 'image/png',
}),
},
imageCategory: 'mask',
isIntermediate: true,
})
);
// Wait for the mask to be uploaded
const [{ payload: maskImageDTO }] = await take(
// Wait for the image to be uploaded, matching by request id
const [{ payload }] = await take(
(action): action is ReturnType<typeof imageUploaded.fulfilled> =>
imageUploaded.fulfilled.match(action) &&
action.meta.arg.formData.file.name === maskFilename
action.meta.requestId === maskImageUploadedRequestId
);
// Update the base node with the image name and type
baseNode.mask = {
image_name: maskImageDTO.image_name,
};
canvasMaskImage = payload;
}
// Create the session and wait for response
dispatch(sessionCreated({ graph }));
const [sessionCreatedAction] = await take(sessionCreated.fulfilled.match);
const graph = buildCanvasGraph(
state,
generationMode,
canvasInitImage,
canvasMaskImage
);
moduleLog.debug({ graph }, `Canvas graph built`);
// currently this action is just listened to for logging
dispatch(canvasGraphBuilt(graph));
// Create the session, store the request id
const { requestId: sessionCreatedRequestId } = dispatch(
sessionCreated({ graph })
);
// Take the session created action, matching by its request id
const [sessionCreatedAction] = await take(
(action): action is ReturnType<typeof sessionCreated.fulfilled> =>
sessionCreated.fulfilled.match(action) &&
action.meta.requestId === sessionCreatedRequestId
);
const sessionId = sessionCreatedAction.payload.id;
// Associate the init image with the session, now that we have the session ID
if (
(baseNode.type === 'img2img' || baseNode.type === 'inpaint') &&
baseNode.image
) {
if (['img2img', 'inpaint'].includes(generationMode) && canvasInitImage) {
dispatch(
imageUpdated({
imageName: baseNode.image.image_name,
imageName: canvasInitImage.image_name,
requestBody: { session_id: sessionId },
})
);
}
// Associate the mask image with the session, now that we have the session ID
if (baseNode.type === 'inpaint' && baseNode.mask) {
if (['inpaint'].includes(generationMode) && canvasMaskImage) {
dispatch(
imageUpdated({
imageName: baseNode.mask.image_name,
imageName: canvasMaskImage.image_name,
requestBody: { session_id: sessionId },
})
);
}
// Prep the canvas staging area if it is not yet initialized
if (!state.canvas.layerState.stagingArea.boundingBox) {
dispatch(
stagingAreaInitialized({

View File

@ -1,10 +1,10 @@
import { startAppListening } from '..';
import { buildImageToImageGraph } from 'features/nodes/util/graphBuilders/buildImageToImageGraph';
import { sessionCreated } from 'services/thunks/session';
import { log } from 'app/logging/useLogger';
import { imageToImageGraphBuilt } from 'features/nodes/store/actions';
import { userInvoked } from 'app/store/actions';
import { sessionReadyToInvoke } from 'features/system/store/actions';
import { buildLinearImageToImageGraph } from 'features/nodes/util/graphBuilders/buildLinearImageToImageGraph';
const moduleLog = log.child({ namespace: 'invoke' });
@ -15,7 +15,7 @@ export const addUserInvokedImageToImageListener = () => {
effect: async (action, { getState, dispatch, take }) => {
const state = getState();
const graph = buildImageToImageGraph(state);
const graph = buildLinearImageToImageGraph(state);
dispatch(imageToImageGraphBuilt(graph));
moduleLog.debug({ data: graph }, 'Image to Image graph built');

View File

@ -1,10 +1,10 @@
import { startAppListening } from '..';
import { buildTextToImageGraph } from 'features/nodes/util/graphBuilders/buildTextToImageGraph';
import { sessionCreated } from 'services/thunks/session';
import { log } from 'app/logging/useLogger';
import { textToImageGraphBuilt } from 'features/nodes/store/actions';
import { userInvoked } from 'app/store/actions';
import { sessionReadyToInvoke } from 'features/system/store/actions';
import { buildLinearTextToImageGraph } from 'features/nodes/util/graphBuilders/buildLinearTextToImageGraph';
const moduleLog = log.child({ namespace: 'invoke' });
@ -15,7 +15,7 @@ export const addUserInvokedTextToImageListener = () => {
effect: async (action, { getState, dispatch, take }) => {
const state = getState();
const graph = buildTextToImageGraph(state);
const graph = buildLinearTextToImageGraph(state);
dispatch(textToImageGraphBuilt(graph));

View File

@ -2,8 +2,7 @@ import { RootState } from 'app/store/store';
import { filter, forEach, size } from 'lodash-es';
import { CollectInvocation, ControlNetInvocation } from 'services/api';
import { NonNullableGraph } from '../types/types';
const CONTROL_NET_COLLECT = 'control_net_collect';
import { CONTROL_NET_COLLECT } from './graphBuilders/constants';
export const addControlNetToLinearGraph = (
graph: NonNullableGraph,
@ -37,7 +36,7 @@ export const addControlNetToLinearGraph = (
});
}
forEach(controlNets, (controlNet, index) => {
forEach(controlNets, (controlNet) => {
const {
controlNetId,
isEnabled,

View File

@ -1,116 +1,39 @@
import { RootState } from 'app/store/store';
import {
Edge,
ImageToImageInvocation,
InpaintInvocation,
IterateInvocation,
RandomRangeInvocation,
RangeInvocation,
TextToImageInvocation,
} from 'services/api';
import { buildImg2ImgNode } from '../nodeBuilders/buildImageToImageNode';
import { buildTxt2ImgNode } from '../nodeBuilders/buildTextToImageNode';
import { buildRangeNode } from '../nodeBuilders/buildRangeNode';
import { buildIterateNode } from '../nodeBuilders/buildIterateNode';
import { buildEdges } from '../edgeBuilders/buildEdges';
import { ImageDTO } from 'services/api';
import { log } from 'app/logging/useLogger';
import { buildInpaintNode } from '../nodeBuilders/buildInpaintNode';
import { forEach } from 'lodash-es';
import { buildCanvasInpaintGraph } from './buildCanvasInpaintGraph';
import { NonNullableGraph } from 'features/nodes/types/types';
import { buildCanvasImageToImageGraph } from './buildCanvasImageToImageGraph';
import { buildCanvasTextToImageGraph } from './buildCanvasTextToImageGraph';
const moduleLog = log.child({ namespace: 'nodes' });
const buildBaseNode = (
nodeType: 'txt2img' | 'img2img' | 'inpaint' | 'outpaint',
state: RootState
):
| TextToImageInvocation
| ImageToImageInvocation
| InpaintInvocation
| undefined => {
const overrides = {
...state.canvas.boundingBoxDimensions,
is_intermediate: true,
};
if (nodeType === 'txt2img') {
return buildTxt2ImgNode(state, overrides);
}
if (nodeType === 'img2img') {
return buildImg2ImgNode(state, overrides);
}
if (nodeType === 'inpaint' || nodeType === 'outpaint') {
return buildInpaintNode(state, overrides);
}
};
/**
* Builds the Canvas workflow graph and image blobs.
*/
export const buildCanvasGraphComponents = async (
export const buildCanvasGraph = (
state: RootState,
generationMode: 'txt2img' | 'img2img' | 'inpaint' | 'outpaint'
): Promise<
| {
rangeNode: RangeInvocation | RandomRangeInvocation;
iterateNode: IterateInvocation;
baseNode:
| TextToImageInvocation
| ImageToImageInvocation
| InpaintInvocation;
edges: Edge[];
}
| undefined
> => {
// The base node is a txt2img, img2img or inpaint node
const baseNode = buildBaseNode(generationMode, state);
generationMode: 'txt2img' | 'img2img' | 'inpaint' | 'outpaint',
canvasInitImage: ImageDTO | undefined,
canvasMaskImage: ImageDTO | undefined
) => {
let graph: NonNullableGraph;
if (!baseNode) {
moduleLog.error('Problem building base node');
return;
if (generationMode === 'txt2img') {
graph = buildCanvasTextToImageGraph(state);
} else if (generationMode === 'img2img') {
if (!canvasInitImage) {
throw new Error('Missing canvas init image');
}
graph = buildCanvasImageToImageGraph(state, canvasInitImage);
} else {
if (!canvasInitImage || !canvasMaskImage) {
throw new Error('Missing canvas init and mask images');
}
graph = buildCanvasInpaintGraph(state, canvasInitImage, canvasMaskImage);
}
if (baseNode.type === 'inpaint') {
const {
seamSize,
seamBlur,
seamSteps,
seamStrength,
tileSize,
infillMethod,
} = state.generation;
forEach(graph.nodes, (node) => {
graph.nodes[node.id].is_intermediate = true;
});
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } =
state.canvas;
if (boundingBoxScaleMethod !== 'none') {
baseNode.inpaint_width = scaledBoundingBoxDimensions.width;
baseNode.inpaint_height = scaledBoundingBoxDimensions.height;
}
baseNode.seam_size = seamSize;
baseNode.seam_blur = seamBlur;
baseNode.seam_strength = seamStrength;
baseNode.seam_steps = seamSteps;
baseNode.infill_method = infillMethod as InpaintInvocation['infill_method'];
if (infillMethod === 'tile') {
baseNode.tile_size = tileSize;
}
}
// We always range and iterate nodes, no matter the iteration count
// This is required to provide the correct seeds to the backend engine
const rangeNode = buildRangeNode(state);
const iterateNode = buildIterateNode();
// Build the edges for the nodes selected.
const edges = buildEdges(baseNode, rangeNode, iterateNode);
return {
rangeNode,
iterateNode,
baseNode,
edges,
};
return graph;
};

View File

@ -0,0 +1,331 @@
import { RootState } from 'app/store/store';
import {
ImageDTO,
ImageResizeInvocation,
RandomIntInvocation,
RangeOfSizeInvocation,
} from 'services/api';
import { NonNullableGraph } from 'features/nodes/types/types';
import { log } from 'app/logging/useLogger';
import {
ITERATE,
LATENTS_TO_IMAGE,
MODEL_LOADER,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
RANDOM_INT,
RANGE_OF_SIZE,
IMAGE_TO_IMAGE_GRAPH,
IMAGE_TO_LATENTS,
LATENTS_TO_LATENTS,
RESIZE,
} from './constants';
import { set } from 'lodash-es';
import { addControlNetToLinearGraph } from '../addControlNetToLinearGraph';
const moduleLog = log.child({ namespace: 'nodes' });
/**
* Builds the Canvas tab's Image to Image graph.
*/
export const buildCanvasImageToImageGraph = (
state: RootState,
initialImage: ImageDTO
): NonNullableGraph => {
const {
positivePrompt,
negativePrompt,
model: model_name,
cfgScale: cfg_scale,
scheduler,
steps,
img2imgStrength: strength,
iterations,
seed,
shouldRandomizeSeed,
} = state.generation;
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
/**
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
* full graph here as a template. Then use the parameters from app state and set friendlier node
* ids.
*
* The only thing we need extra logic for is handling randomized seed, control net, and for img2img,
* the `fit` param. These are added to the graph at the end.
*/
// copy-pasted graph from node editor, filled in with state values & friendly node ids
const graph: NonNullableGraph = {
id: IMAGE_TO_IMAGE_GRAPH,
nodes: {
[POSITIVE_CONDITIONING]: {
type: 'compel',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'compel',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
},
[RANGE_OF_SIZE]: {
type: 'range_of_size',
id: RANGE_OF_SIZE,
// seed - must be connected manually
// start: 0,
size: iterations,
step: 1,
},
[NOISE]: {
type: 'noise',
id: NOISE,
},
[MODEL_LOADER]: {
type: 'sd1_model_loader',
id: MODEL_LOADER,
model_name,
},
[LATENTS_TO_IMAGE]: {
type: 'l2i',
id: LATENTS_TO_IMAGE,
},
[ITERATE]: {
type: 'iterate',
id: ITERATE,
},
[LATENTS_TO_LATENTS]: {
type: 'l2l',
id: LATENTS_TO_LATENTS,
cfg_scale,
scheduler,
steps,
strength,
},
[IMAGE_TO_LATENTS]: {
type: 'i2l',
id: IMAGE_TO_LATENTS,
// must be set manually later, bc `fit` parameter may require a resize node inserted
// image: {
// image_name: initialImage.image_name,
// },
},
},
edges: [
{
source: {
node_id: MODEL_LOADER,
field: 'clip',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'clip',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'vae',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'vae',
},
},
{
source: {
node_id: RANGE_OF_SIZE,
field: 'collection',
},
destination: {
node_id: ITERATE,
field: 'collection',
},
},
{
source: {
node_id: ITERATE,
field: 'item',
},
destination: {
node_id: NOISE,
field: 'seed',
},
},
{
source: {
node_id: LATENTS_TO_LATENTS,
field: 'latents',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
{
source: {
node_id: IMAGE_TO_LATENTS,
field: 'latents',
},
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'latents',
},
},
{
source: {
node_id: NOISE,
field: 'noise',
},
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'noise',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'vae',
},
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'vae',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'unet',
},
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'unet',
},
},
{
source: {
node_id: NEGATIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'negative_conditioning',
},
},
{
source: {
node_id: POSITIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'positive_conditioning',
},
},
],
};
// handle seed
if (shouldRandomizeSeed) {
// Random int node to generate the starting seed
const randomIntNode: RandomIntInvocation = {
id: RANDOM_INT,
type: 'rand_int',
};
graph.nodes[RANDOM_INT] = randomIntNode;
// Connect random int to the start of the range of size so the range starts on the random first seed
graph.edges.push({
source: { node_id: RANDOM_INT, field: 'a' },
destination: { node_id: RANGE_OF_SIZE, field: 'start' },
});
} else {
// User specified seed, so set the start of the range of size to the seed
(graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed;
}
// handle `fit`
if (initialImage.width !== width || initialImage.height !== height) {
// The init image needs to be resized to the specified width and height before being passed to `IMAGE_TO_LATENTS`
// Create a resize node, explicitly setting its image
const resizeNode: ImageResizeInvocation = {
id: RESIZE,
type: 'img_resize',
image: {
image_name: initialImage.image_name,
},
is_intermediate: true,
width,
height,
};
graph.nodes[RESIZE] = resizeNode;
// The `RESIZE` node then passes its image to `IMAGE_TO_LATENTS`
graph.edges.push({
source: { node_id: RESIZE, field: 'image' },
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'image',
},
});
// The `RESIZE` node also passes its width and height to `NOISE`
graph.edges.push({
source: { node_id: RESIZE, field: 'width' },
destination: {
node_id: NOISE,
field: 'width',
},
});
graph.edges.push({
source: { node_id: RESIZE, field: 'height' },
destination: {
node_id: NOISE,
field: 'height',
},
});
} else {
// We are not resizing, so we need to set the image on the `IMAGE_TO_LATENTS` node explicitly
set(graph.nodes[IMAGE_TO_LATENTS], 'image', {
image_name: initialImage.image_name,
});
// Pass the image's dimensions to the `NOISE` node
graph.edges.push({
source: { node_id: IMAGE_TO_LATENTS, field: 'width' },
destination: {
node_id: NOISE,
field: 'width',
},
});
graph.edges.push({
source: { node_id: IMAGE_TO_LATENTS, field: 'height' },
destination: {
node_id: NOISE,
field: 'height',
},
});
}
// add controlnet
addControlNetToLinearGraph(graph, LATENTS_TO_LATENTS, state);
return graph;
};

View File

@ -0,0 +1,224 @@
import { RootState } from 'app/store/store';
import {
ImageDTO,
InpaintInvocation,
RandomIntInvocation,
RangeOfSizeInvocation,
} from 'services/api';
import { NonNullableGraph } from 'features/nodes/types/types';
import { log } from 'app/logging/useLogger';
import {
ITERATE,
MODEL_LOADER,
NEGATIVE_CONDITIONING,
POSITIVE_CONDITIONING,
RANDOM_INT,
RANGE_OF_SIZE,
INPAINT_GRAPH,
INPAINT,
} from './constants';
const moduleLog = log.child({ namespace: 'nodes' });
/**
* Builds the Canvas tab's Inpaint graph.
*/
export const buildCanvasInpaintGraph = (
state: RootState,
canvasInitImage: ImageDTO,
canvasMaskImage: ImageDTO
): NonNullableGraph => {
const {
positivePrompt,
negativePrompt,
model: model_name,
cfgScale: cfg_scale,
scheduler,
steps,
img2imgStrength: strength,
shouldFitToWidthHeight,
iterations,
seed,
shouldRandomizeSeed,
seamSize,
seamBlur,
seamSteps,
seamStrength,
tileSize,
infillMethod,
} = state.generation;
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
// We may need to set the inpaint width and height to scale the image
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const graph: NonNullableGraph = {
id: INPAINT_GRAPH,
nodes: {
[INPAINT]: {
type: 'inpaint',
id: INPAINT,
steps,
width,
height,
cfg_scale,
scheduler,
image: {
image_name: canvasInitImage.image_name,
},
strength,
fit: shouldFitToWidthHeight,
mask: {
image_name: canvasMaskImage.image_name,
},
seam_size: seamSize,
seam_blur: seamBlur,
seam_strength: seamStrength,
seam_steps: seamSteps,
tile_size: infillMethod === 'tile' ? tileSize : undefined,
infill_method: infillMethod as InpaintInvocation['infill_method'],
inpaint_width:
boundingBoxScaleMethod !== 'none'
? scaledBoundingBoxDimensions.width
: undefined,
inpaint_height:
boundingBoxScaleMethod !== 'none'
? scaledBoundingBoxDimensions.height
: undefined,
},
[POSITIVE_CONDITIONING]: {
type: 'compel',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'compel',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
},
[MODEL_LOADER]: {
type: 'sd1_model_loader',
id: MODEL_LOADER,
model_name,
},
[RANGE_OF_SIZE]: {
type: 'range_of_size',
id: RANGE_OF_SIZE,
// seed - must be connected manually
// start: 0,
size: iterations,
step: 1,
},
[ITERATE]: {
type: 'iterate',
id: ITERATE,
},
},
edges: [
{
source: {
node_id: NEGATIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: INPAINT,
field: 'negative_conditioning',
},
},
{
source: {
node_id: POSITIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: INPAINT,
field: 'positive_conditioning',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'clip',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'clip',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'unet',
},
destination: {
node_id: INPAINT,
field: 'unet',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'vae',
},
destination: {
node_id: INPAINT,
field: 'vae',
},
},
{
source: {
node_id: RANGE_OF_SIZE,
field: 'collection',
},
destination: {
node_id: ITERATE,
field: 'collection',
},
},
{
source: {
node_id: ITERATE,
field: 'item',
},
destination: {
node_id: INPAINT,
field: 'seed',
},
},
],
};
// handle seed
if (shouldRandomizeSeed) {
// Random int node to generate the starting seed
const randomIntNode: RandomIntInvocation = {
id: RANDOM_INT,
type: 'rand_int',
};
graph.nodes[RANDOM_INT] = randomIntNode;
// Connect random int to the start of the range of size so the range starts on the random first seed
graph.edges.push({
source: { node_id: RANDOM_INT, field: 'a' },
destination: { node_id: RANGE_OF_SIZE, field: 'start' },
});
} else {
// User specified seed, so set the start of the range of size to the seed
(graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed;
}
return graph;
};

View File

@ -0,0 +1,224 @@
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { RandomIntInvocation, RangeOfSizeInvocation } from 'services/api';
import {
ITERATE,
LATENTS_TO_IMAGE,
MODEL_LOADER,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
RANDOM_INT,
RANGE_OF_SIZE,
TEXT_TO_IMAGE_GRAPH,
TEXT_TO_LATENTS,
} from './constants';
import { addControlNetToLinearGraph } from '../addControlNetToLinearGraph';
/**
* Builds the Canvas tab's Text to Image graph.
*/
export const buildCanvasTextToImageGraph = (
state: RootState
): NonNullableGraph => {
const {
positivePrompt,
negativePrompt,
model: model_name,
cfgScale: cfg_scale,
scheduler,
steps,
iterations,
seed,
shouldRandomizeSeed,
} = state.generation;
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
/**
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
* full graph here as a template. Then use the parameters from app state and set friendlier node
* ids.
*
* The only thing we need extra logic for is handling randomized seed, control net, and for img2img,
* the `fit` param. These are added to the graph at the end.
*/
// copy-pasted graph from node editor, filled in with state values & friendly node ids
const graph: NonNullableGraph = {
id: TEXT_TO_IMAGE_GRAPH,
nodes: {
[POSITIVE_CONDITIONING]: {
type: 'compel',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'compel',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
},
[RANGE_OF_SIZE]: {
type: 'range_of_size',
id: RANGE_OF_SIZE,
// start: 0, // seed - must be connected manually
size: iterations,
step: 1,
},
[NOISE]: {
type: 'noise',
id: NOISE,
width,
height,
},
[TEXT_TO_LATENTS]: {
type: 't2l',
id: TEXT_TO_LATENTS,
cfg_scale,
scheduler,
steps,
},
[MODEL_LOADER]: {
type: 'sd1_model_loader',
id: MODEL_LOADER,
model_name,
},
[LATENTS_TO_IMAGE]: {
type: 'l2i',
id: LATENTS_TO_IMAGE,
},
[ITERATE]: {
type: 'iterate',
id: ITERATE,
},
},
edges: [
{
source: {
node_id: NEGATIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: TEXT_TO_LATENTS,
field: 'negative_conditioning',
},
},
{
source: {
node_id: POSITIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: TEXT_TO_LATENTS,
field: 'positive_conditioning',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'clip',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'clip',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'unet',
},
destination: {
node_id: TEXT_TO_LATENTS,
field: 'unet',
},
},
{
source: {
node_id: TEXT_TO_LATENTS,
field: 'latents',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'vae',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'vae',
},
},
{
source: {
node_id: RANGE_OF_SIZE,
field: 'collection',
},
destination: {
node_id: ITERATE,
field: 'collection',
},
},
{
source: {
node_id: ITERATE,
field: 'item',
},
destination: {
node_id: NOISE,
field: 'seed',
},
},
{
source: {
node_id: NOISE,
field: 'noise',
},
destination: {
node_id: TEXT_TO_LATENTS,
field: 'noise',
},
},
],
};
// handle seed
if (shouldRandomizeSeed) {
// Random int node to generate the starting seed
const randomIntNode: RandomIntInvocation = {
id: RANDOM_INT,
type: 'rand_int',
};
graph.nodes[RANDOM_INT] = randomIntNode;
// Connect random int to the start of the range of size so the range starts on the random first seed
graph.edges.push({
source: { node_id: RANDOM_INT, field: 'a' },
destination: { node_id: RANGE_OF_SIZE, field: 'start' },
});
} else {
// User specified seed, so set the start of the range of size to the seed
(graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed;
}
// add controlnet
addControlNetToLinearGraph(graph, TEXT_TO_LATENTS, state);
return graph;
};

View File

@ -1,465 +0,0 @@
import { log } from 'app/logging/useLogger';
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { set } from 'lodash-es';
import {
CompelInvocation,
Graph,
ImageResizeInvocation,
ImageToLatentsInvocation,
IterateInvocation,
LatentsToImageInvocation,
LatentsToLatentsInvocation,
NoiseInvocation,
RandomIntInvocation,
RangeOfSizeInvocation,
SD1ModelLoaderInvocation,
SD2ModelLoaderInvocation,
} from 'services/api';
import { addControlNetToLinearGraph } from '../addControlNetToLinearGraph';
const moduleLog = log.child({ namespace: 'nodes' });
const MODEL_LOADER = 'model_loader';
const POSITIVE_CONDITIONING = 'positive_conditioning';
const NEGATIVE_CONDITIONING = 'negative_conditioning';
const IMAGE_TO_LATENTS = 'image_to_latents';
const LATENTS_TO_LATENTS = 'latents_to_latents';
const LATENTS_TO_IMAGE = 'latents_to_image';
const RESIZE = 'resize_image';
const NOISE = 'noise';
const RANDOM_INT = 'rand_int';
const RANGE_OF_SIZE = 'range_of_size';
const ITERATE = 'iterate';
/**
* Builds the Image to Image tab graph.
*/
export const buildImageToImageGraph = (state: RootState): Graph => {
const {
positivePrompt,
negativePrompt,
model,
currentModelType,
cfgScale: cfg_scale,
scheduler,
steps,
initialImage,
img2imgStrength: strength,
shouldFitToWidthHeight,
width,
height,
iterations,
seed,
shouldRandomizeSeed,
} = state.generation;
if (!initialImage) {
moduleLog.error('No initial image found in state');
throw new Error('No initial image found in state');
}
const graph: NonNullableGraph = {
nodes: {},
edges: [],
};
// Create the model loader node
const modelLoaderNode: SD1ModelLoaderInvocation | SD2ModelLoaderInvocation = {
id: MODEL_LOADER,
type: currentModelType,
model_name: model,
};
// Create the positive conditioning (prompt) node
const positiveConditioningNode: CompelInvocation = {
id: POSITIVE_CONDITIONING,
type: 'compel',
prompt: positivePrompt,
};
// Negative conditioning
const negativeConditioningNode: CompelInvocation = {
id: NEGATIVE_CONDITIONING,
type: 'compel',
prompt: negativePrompt,
};
// This will encode the raster image to latents - but it may get its `image` from a resize node,
// so we do not set its `image` property yet
const imageToLatentsNode: ImageToLatentsInvocation = {
id: IMAGE_TO_LATENTS,
type: 'i2l',
};
// This does the actual img2img inference
const latentsToLatentsNode: LatentsToLatentsInvocation = {
id: LATENTS_TO_LATENTS,
type: 'l2l',
cfg_scale,
scheduler,
steps,
strength,
};
// Finally we decode the latents back to an image
const latentsToImageNode: LatentsToImageInvocation = {
id: LATENTS_TO_IMAGE,
type: 'l2i',
};
// Add all those nodes to the graph
graph.nodes[MODEL_LOADER] = modelLoaderNode;
graph.nodes[POSITIVE_CONDITIONING] = positiveConditioningNode;
graph.nodes[NEGATIVE_CONDITIONING] = negativeConditioningNode;
graph.nodes[IMAGE_TO_LATENTS] = imageToLatentsNode;
graph.nodes[LATENTS_TO_LATENTS] = latentsToLatentsNode;
graph.nodes[LATENTS_TO_IMAGE] = latentsToImageNode;
// Connect the model loader to the required nodes
graph.edges.push({
source: { node_id: MODEL_LOADER, field: 'clip' },
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
});
graph.edges.push({
source: { node_id: MODEL_LOADER, field: 'clip' },
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
});
graph.edges.push({
source: { node_id: MODEL_LOADER, field: 'vae' },
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'vae',
},
});
graph.edges.push({
source: { node_id: MODEL_LOADER, field: 'unet' },
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'unet',
},
});
graph.edges.push({
source: { node_id: MODEL_LOADER, field: 'vae' },
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'vae',
},
});
// Connect the prompt nodes to the imageToLatents node
graph.edges.push({
source: { node_id: POSITIVE_CONDITIONING, field: 'conditioning' },
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'positive_conditioning',
},
});
graph.edges.push({
source: { node_id: NEGATIVE_CONDITIONING, field: 'conditioning' },
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'negative_conditioning',
},
});
// Connect the image-encoding node
graph.edges.push({
source: { node_id: IMAGE_TO_LATENTS, field: 'latents' },
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'latents',
},
});
// Connect the image-decoding node
graph.edges.push({
source: { node_id: LATENTS_TO_LATENTS, field: 'latents' },
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
});
/**
* Now we need to handle iterations and random seeds. There are four possible scenarios:
* - Single iteration, explicit seed
* - Single iteration, random seed
* - Multiple iterations, explicit seed
* - Multiple iterations, random seed
*
* They all have different graphs and connections.
*/
// Single iteration, explicit seed
if (!shouldRandomizeSeed && iterations === 1) {
// Noise node using the explicit seed
const noiseNode: NoiseInvocation = {
id: NOISE,
type: 'noise',
seed: seed,
};
graph.nodes[NOISE] = noiseNode;
// Connect noise to l2l
graph.edges.push({
source: { node_id: NOISE, field: 'noise' },
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'noise',
},
});
}
// Single iteration, random seed
if (shouldRandomizeSeed && iterations === 1) {
// Random int node to generate the seed
const randomIntNode: RandomIntInvocation = {
id: RANDOM_INT,
type: 'rand_int',
};
// Noise node without any seed
const noiseNode: NoiseInvocation = {
id: NOISE,
type: 'noise',
};
graph.nodes[RANDOM_INT] = randomIntNode;
graph.nodes[NOISE] = noiseNode;
// Connect random int to the seed of the noise node
graph.edges.push({
source: { node_id: RANDOM_INT, field: 'a' },
destination: {
node_id: NOISE,
field: 'seed',
},
});
// Connect noise to l2l
graph.edges.push({
source: { node_id: NOISE, field: 'noise' },
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'noise',
},
});
}
// Multiple iterations, explicit seed
if (!shouldRandomizeSeed && iterations > 1) {
// Range of size node to generate `iterations` count of seeds - range of size generates a collection
// of ints from `start` to `start + size`. The `start` is the seed, and the `size` is the number of
// iterations.
const rangeOfSizeNode: RangeOfSizeInvocation = {
id: RANGE_OF_SIZE,
type: 'range_of_size',
start: seed,
size: iterations,
};
// Iterate node to iterate over the seeds generated by the range of size node
const iterateNode: IterateInvocation = {
id: ITERATE,
type: 'iterate',
};
// Noise node without any seed
const noiseNode: NoiseInvocation = {
id: NOISE,
type: 'noise',
};
// Adding to the graph
graph.nodes[RANGE_OF_SIZE] = rangeOfSizeNode;
graph.nodes[ITERATE] = iterateNode;
graph.nodes[NOISE] = noiseNode;
// Connect range of size to iterate
graph.edges.push({
source: { node_id: RANGE_OF_SIZE, field: 'collection' },
destination: {
node_id: ITERATE,
field: 'collection',
},
});
// Connect iterate to noise
graph.edges.push({
source: {
node_id: ITERATE,
field: 'item',
},
destination: {
node_id: NOISE,
field: 'seed',
},
});
// Connect noise to l2l
graph.edges.push({
source: { node_id: NOISE, field: 'noise' },
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'noise',
},
});
}
// Multiple iterations, random seed
if (shouldRandomizeSeed && iterations > 1) {
// Random int node to generate the seed
const randomIntNode: RandomIntInvocation = {
id: RANDOM_INT,
type: 'rand_int',
};
// Range of size node to generate `iterations` count of seeds - range of size generates a collection
const rangeOfSizeNode: RangeOfSizeInvocation = {
id: RANGE_OF_SIZE,
type: 'range_of_size',
size: iterations,
};
// Iterate node to iterate over the seeds generated by the range of size node
const iterateNode: IterateInvocation = {
id: ITERATE,
type: 'iterate',
};
// Noise node without any seed
const noiseNode: NoiseInvocation = {
id: NOISE,
type: 'noise',
width,
height,
};
// Adding to the graph
graph.nodes[RANDOM_INT] = randomIntNode;
graph.nodes[RANGE_OF_SIZE] = rangeOfSizeNode;
graph.nodes[ITERATE] = iterateNode;
graph.nodes[NOISE] = noiseNode;
// Connect random int to the start of the range of size so the range starts on the random first seed
graph.edges.push({
source: { node_id: RANDOM_INT, field: 'a' },
destination: { node_id: RANGE_OF_SIZE, field: 'start' },
});
// Connect range of size to iterate
graph.edges.push({
source: { node_id: RANGE_OF_SIZE, field: 'collection' },
destination: {
node_id: ITERATE,
field: 'collection',
},
});
// Connect iterate to noise
graph.edges.push({
source: {
node_id: ITERATE,
field: 'item',
},
destination: {
node_id: NOISE,
field: 'seed',
},
});
// Connect noise to l2l
graph.edges.push({
source: { node_id: NOISE, field: 'noise' },
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'noise',
},
});
}
if (
shouldFitToWidthHeight &&
(initialImage.width !== width || initialImage.height !== height)
) {
// The init image needs to be resized to the specified width and height before being passed to `IMAGE_TO_LATENTS`
// Create a resize node, explicitly setting its image
const resizeNode: ImageResizeInvocation = {
id: RESIZE,
type: 'img_resize',
image: {
image_name: initialImage.image_name,
},
is_intermediate: true,
height,
width,
};
graph.nodes[RESIZE] = resizeNode;
// The `RESIZE` node then passes its image to `IMAGE_TO_LATENTS`
graph.edges.push({
source: { node_id: RESIZE, field: 'image' },
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'image',
},
});
// The `RESIZE` node also passes its width and height to `NOISE`
graph.edges.push({
source: { node_id: RESIZE, field: 'width' },
destination: {
node_id: NOISE,
field: 'width',
},
});
graph.edges.push({
source: { node_id: RESIZE, field: 'height' },
destination: {
node_id: NOISE,
field: 'height',
},
});
} else {
// We are not resizing, so we need to set the image on the `IMAGE_TO_LATENTS` node explicitly
set(graph.nodes[IMAGE_TO_LATENTS], 'image', {
image_name: initialImage.image_name,
});
// Pass the image's dimensions to the `NOISE` node
graph.edges.push({
source: { node_id: IMAGE_TO_LATENTS, field: 'width' },
destination: {
node_id: NOISE,
field: 'width',
},
});
graph.edges.push({
source: { node_id: IMAGE_TO_LATENTS, field: 'height' },
destination: {
node_id: NOISE,
field: 'height',
},
});
}
addControlNetToLinearGraph(graph, LATENTS_TO_LATENTS, state);
return graph;
};

View File

@ -0,0 +1,338 @@
import { RootState } from 'app/store/store';
import {
ImageResizeInvocation,
RandomIntInvocation,
RangeOfSizeInvocation,
} from 'services/api';
import { NonNullableGraph } from 'features/nodes/types/types';
import { log } from 'app/logging/useLogger';
import {
ITERATE,
LATENTS_TO_IMAGE,
MODEL_LOADER,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
RANDOM_INT,
RANGE_OF_SIZE,
IMAGE_TO_IMAGE_GRAPH,
IMAGE_TO_LATENTS,
LATENTS_TO_LATENTS,
RESIZE,
} from './constants';
import { set } from 'lodash-es';
import { addControlNetToLinearGraph } from '../addControlNetToLinearGraph';
const moduleLog = log.child({ namespace: 'nodes' });
/**
* Builds the Image to Image tab graph.
*/
export const buildLinearImageToImageGraph = (
state: RootState
): NonNullableGraph => {
const {
positivePrompt,
negativePrompt,
model: model_name,
cfgScale: cfg_scale,
scheduler,
steps,
initialImage,
img2imgStrength: strength,
shouldFitToWidthHeight,
width,
height,
iterations,
seed,
shouldRandomizeSeed,
} = state.generation;
/**
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
* full graph here as a template. Then use the parameters from app state and set friendlier node
* ids.
*
* The only thing we need extra logic for is handling randomized seed, control net, and for img2img,
* the `fit` param. These are added to the graph at the end.
*/
if (!initialImage) {
moduleLog.error('No initial image found in state');
throw new Error('No initial image found in state');
}
// copy-pasted graph from node editor, filled in with state values & friendly node ids
const graph: NonNullableGraph = {
id: IMAGE_TO_IMAGE_GRAPH,
nodes: {
[POSITIVE_CONDITIONING]: {
type: 'compel',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'compel',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
},
[RANGE_OF_SIZE]: {
type: 'range_of_size',
id: RANGE_OF_SIZE,
// seed - must be connected manually
// start: 0,
size: iterations,
step: 1,
},
[NOISE]: {
type: 'noise',
id: NOISE,
},
[MODEL_LOADER]: {
type: 'sd1_model_loader',
id: MODEL_LOADER,
model_name,
},
[LATENTS_TO_IMAGE]: {
type: 'l2i',
id: LATENTS_TO_IMAGE,
},
[ITERATE]: {
type: 'iterate',
id: ITERATE,
},
[LATENTS_TO_LATENTS]: {
type: 'l2l',
id: LATENTS_TO_LATENTS,
cfg_scale,
scheduler,
steps,
strength,
},
[IMAGE_TO_LATENTS]: {
type: 'i2l',
id: IMAGE_TO_LATENTS,
// must be set manually later, bc `fit` parameter may require a resize node inserted
// image: {
// image_name: initialImage.image_name,
// },
},
},
edges: [
{
source: {
node_id: MODEL_LOADER,
field: 'clip',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'clip',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'vae',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'vae',
},
},
{
source: {
node_id: RANGE_OF_SIZE,
field: 'collection',
},
destination: {
node_id: ITERATE,
field: 'collection',
},
},
{
source: {
node_id: ITERATE,
field: 'item',
},
destination: {
node_id: NOISE,
field: 'seed',
},
},
{
source: {
node_id: LATENTS_TO_LATENTS,
field: 'latents',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
{
source: {
node_id: IMAGE_TO_LATENTS,
field: 'latents',
},
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'latents',
},
},
{
source: {
node_id: NOISE,
field: 'noise',
},
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'noise',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'vae',
},
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'vae',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'unet',
},
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'unet',
},
},
{
source: {
node_id: NEGATIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'negative_conditioning',
},
},
{
source: {
node_id: POSITIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: LATENTS_TO_LATENTS,
field: 'positive_conditioning',
},
},
],
};
// handle seed
if (shouldRandomizeSeed) {
// Random int node to generate the starting seed
const randomIntNode: RandomIntInvocation = {
id: RANDOM_INT,
type: 'rand_int',
};
graph.nodes[RANDOM_INT] = randomIntNode;
// Connect random int to the start of the range of size so the range starts on the random first seed
graph.edges.push({
source: { node_id: RANDOM_INT, field: 'a' },
destination: { node_id: RANGE_OF_SIZE, field: 'start' },
});
} else {
// User specified seed, so set the start of the range of size to the seed
(graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed;
}
// handle `fit`
if (
shouldFitToWidthHeight &&
(initialImage.width !== width || initialImage.height !== height)
) {
// The init image needs to be resized to the specified width and height before being passed to `IMAGE_TO_LATENTS`
// Create a resize node, explicitly setting its image
const resizeNode: ImageResizeInvocation = {
id: RESIZE,
type: 'img_resize',
image: {
image_name: initialImage.image_name,
},
is_intermediate: true,
width,
height,
};
graph.nodes[RESIZE] = resizeNode;
// The `RESIZE` node then passes its image to `IMAGE_TO_LATENTS`
graph.edges.push({
source: { node_id: RESIZE, field: 'image' },
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'image',
},
});
// The `RESIZE` node also passes its width and height to `NOISE`
graph.edges.push({
source: { node_id: RESIZE, field: 'width' },
destination: {
node_id: NOISE,
field: 'width',
},
});
graph.edges.push({
source: { node_id: RESIZE, field: 'height' },
destination: {
node_id: NOISE,
field: 'height',
},
});
} else {
// We are not resizing, so we need to set the image on the `IMAGE_TO_LATENTS` node explicitly
set(graph.nodes[IMAGE_TO_LATENTS], 'image', {
image_name: initialImage.image_name,
});
// Pass the image's dimensions to the `NOISE` node
graph.edges.push({
source: { node_id: IMAGE_TO_LATENTS, field: 'width' },
destination: {
node_id: NOISE,
field: 'width',
},
});
graph.edges.push({
source: { node_id: IMAGE_TO_LATENTS, field: 'height' },
destination: {
node_id: NOISE,
field: 'height',
},
});
}
// add controlnet
addControlNetToLinearGraph(graph, LATENTS_TO_LATENTS, state);
return graph;
};

View File

@ -0,0 +1,226 @@
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { RandomIntInvocation, RangeOfSizeInvocation } from 'services/api';
import {
ITERATE,
LATENTS_TO_IMAGE,
MODEL_LOADER,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
RANDOM_INT,
RANGE_OF_SIZE,
TEXT_TO_IMAGE_GRAPH,
TEXT_TO_LATENTS,
} from './constants';
import { addControlNetToLinearGraph } from '../addControlNetToLinearGraph';
type TextToImageGraphOverrides = {
width: number;
height: number;
};
export const buildLinearTextToImageGraph = (
state: RootState,
overrides?: TextToImageGraphOverrides
): NonNullableGraph => {
const {
positivePrompt,
negativePrompt,
model: model_name,
cfgScale: cfg_scale,
scheduler,
steps,
width,
height,
iterations,
seed,
shouldRandomizeSeed,
} = state.generation;
/**
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
* full graph here as a template. Then use the parameters from app state and set friendlier node
* ids.
*
* The only thing we need extra logic for is handling randomized seed, control net, and for img2img,
* the `fit` param. These are added to the graph at the end.
*/
// copy-pasted graph from node editor, filled in with state values & friendly node ids
const graph: NonNullableGraph = {
id: TEXT_TO_IMAGE_GRAPH,
nodes: {
[POSITIVE_CONDITIONING]: {
type: 'compel',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'compel',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
},
[RANGE_OF_SIZE]: {
type: 'range_of_size',
id: RANGE_OF_SIZE,
// start: 0, // seed - must be connected manually
size: iterations,
step: 1,
},
[NOISE]: {
type: 'noise',
id: NOISE,
width: overrides?.width || width,
height: overrides?.height || height,
},
[TEXT_TO_LATENTS]: {
type: 't2l',
id: TEXT_TO_LATENTS,
cfg_scale,
scheduler,
steps,
},
[MODEL_LOADER]: {
type: 'sd1_model_loader',
id: MODEL_LOADER,
model_name,
},
[LATENTS_TO_IMAGE]: {
type: 'l2i',
id: LATENTS_TO_IMAGE,
},
[ITERATE]: {
type: 'iterate',
id: ITERATE,
},
},
edges: [
{
source: {
node_id: NEGATIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: TEXT_TO_LATENTS,
field: 'negative_conditioning',
},
},
{
source: {
node_id: POSITIVE_CONDITIONING,
field: 'conditioning',
},
destination: {
node_id: TEXT_TO_LATENTS,
field: 'positive_conditioning',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'clip',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'clip',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'unet',
},
destination: {
node_id: TEXT_TO_LATENTS,
field: 'unet',
},
},
{
source: {
node_id: TEXT_TO_LATENTS,
field: 'latents',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
{
source: {
node_id: MODEL_LOADER,
field: 'vae',
},
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'vae',
},
},
{
source: {
node_id: RANGE_OF_SIZE,
field: 'collection',
},
destination: {
node_id: ITERATE,
field: 'collection',
},
},
{
source: {
node_id: ITERATE,
field: 'item',
},
destination: {
node_id: NOISE,
field: 'seed',
},
},
{
source: {
node_id: NOISE,
field: 'noise',
},
destination: {
node_id: TEXT_TO_LATENTS,
field: 'noise',
},
},
],
};
// handle seed
if (shouldRandomizeSeed) {
// Random int node to generate the starting seed
const randomIntNode: RandomIntInvocation = {
id: RANDOM_INT,
type: 'rand_int',
};
graph.nodes[RANDOM_INT] = randomIntNode;
// Connect random int to the start of the range of size so the range starts on the random first seed
graph.edges.push({
source: { node_id: RANDOM_INT, field: 'a' },
destination: { node_id: RANGE_OF_SIZE, field: 'start' },
});
} else {
// User specified seed, so set the start of the range of size to the seed
(graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed;
}
// add controlnet
addControlNetToLinearGraph(graph, TEXT_TO_LATENTS, state);
return graph;
};

View File

@ -1,357 +0,0 @@
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import {
CompelInvocation,
Graph,
IterateInvocation,
LatentsToImageInvocation,
NoiseInvocation,
RandomIntInvocation,
RangeOfSizeInvocation,
SD1ModelLoaderInvocation,
SD2ModelLoaderInvocation,
TextToLatentsInvocation,
} from 'services/api';
import { addControlNetToLinearGraph } from '../addControlNetToLinearGraph';
const MODEL_LOADER = 'model_loader';
const POSITIVE_CONDITIONING = 'positive_conditioning';
const NEGATIVE_CONDITIONING = 'negative_conditioning';
const TEXT_TO_LATENTS = 'text_to_latents';
const LATENTS_TO_IMAGE = 'latents_to_image';
const NOISE = 'noise';
const RANDOM_INT = 'rand_int';
const RANGE_OF_SIZE = 'range_of_size';
const ITERATE = 'iterate';
/**
* Builds the Text to Image tab graph.
*/
export const buildTextToImageGraph = (state: RootState): Graph => {
const {
model,
currentModelType,
positivePrompt,
negativePrompt,
cfgScale: cfg_scale,
scheduler,
steps,
width,
height,
iterations,
seed,
shouldRandomizeSeed,
} = state.generation;
const graph: NonNullableGraph = {
nodes: {},
edges: [],
};
// Create the model loader node
const modelLoaderNode: SD1ModelLoaderInvocation | SD2ModelLoaderInvocation = {
id: MODEL_LOADER,
type: currentModelType,
model_name: model,
};
// Create the conditioning, t2l and l2i nodes
const positiveConditioningNode: CompelInvocation = {
id: POSITIVE_CONDITIONING,
type: 'compel',
prompt: positivePrompt,
};
const negativeConditioningNode: CompelInvocation = {
id: NEGATIVE_CONDITIONING,
type: 'compel',
prompt: negativePrompt,
};
const textToLatentsNode: TextToLatentsInvocation = {
id: TEXT_TO_LATENTS,
type: 't2l',
cfg_scale,
scheduler,
steps,
};
const latentsToImageNode: LatentsToImageInvocation = {
id: LATENTS_TO_IMAGE,
type: 'l2i',
};
// Add to the graph
graph.nodes[MODEL_LOADER] = modelLoaderNode;
graph.nodes[POSITIVE_CONDITIONING] = positiveConditioningNode;
graph.nodes[NEGATIVE_CONDITIONING] = negativeConditioningNode;
graph.nodes[TEXT_TO_LATENTS] = textToLatentsNode;
graph.nodes[LATENTS_TO_IMAGE] = latentsToImageNode;
// Connect the model loader to the required nodes
graph.edges.push({
source: { node_id: MODEL_LOADER, field: 'clip' },
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
});
graph.edges.push({
source: { node_id: MODEL_LOADER, field: 'clip' },
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
});
graph.edges.push({
source: { node_id: MODEL_LOADER, field: 'unet' },
destination: {
node_id: TEXT_TO_LATENTS,
field: 'unet',
},
});
graph.edges.push({
source: { node_id: MODEL_LOADER, field: 'vae' },
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'vae',
},
});
// Connect them
graph.edges.push({
source: { node_id: POSITIVE_CONDITIONING, field: 'conditioning' },
destination: {
node_id: TEXT_TO_LATENTS,
field: 'positive_conditioning',
},
});
graph.edges.push({
source: { node_id: NEGATIVE_CONDITIONING, field: 'conditioning' },
destination: {
node_id: TEXT_TO_LATENTS,
field: 'negative_conditioning',
},
});
graph.edges.push({
source: { node_id: TEXT_TO_LATENTS, field: 'latents' },
destination: {
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
});
/**
* Now we need to handle iterations and random seeds. There are four possible scenarios:
* - Single iteration, explicit seed
* - Single iteration, random seed
* - Multiple iterations, explicit seed
* - Multiple iterations, random seed
*
* They all have different graphs and connections.
*/
// Single iteration, explicit seed
if (!shouldRandomizeSeed && iterations === 1) {
// Noise node using the explicit seed
const noiseNode: NoiseInvocation = {
id: NOISE,
type: 'noise',
seed: seed,
width,
height,
};
graph.nodes[NOISE] = noiseNode;
// Connect noise to l2l
graph.edges.push({
source: { node_id: NOISE, field: 'noise' },
destination: {
node_id: TEXT_TO_LATENTS,
field: 'noise',
},
});
}
// Single iteration, random seed
if (shouldRandomizeSeed && iterations === 1) {
// Random int node to generate the seed
const randomIntNode: RandomIntInvocation = {
id: RANDOM_INT,
type: 'rand_int',
};
// Noise node without any seed
const noiseNode: NoiseInvocation = {
id: NOISE,
type: 'noise',
width,
height,
};
graph.nodes[RANDOM_INT] = randomIntNode;
graph.nodes[NOISE] = noiseNode;
// Connect random int to the seed of the noise node
graph.edges.push({
source: { node_id: RANDOM_INT, field: 'a' },
destination: {
node_id: NOISE,
field: 'seed',
},
});
// Connect noise to t2l
graph.edges.push({
source: { node_id: NOISE, field: 'noise' },
destination: {
node_id: TEXT_TO_LATENTS,
field: 'noise',
},
});
}
// Multiple iterations, explicit seed
if (!shouldRandomizeSeed && iterations > 1) {
// Range of size node to generate `iterations` count of seeds - range of size generates a collection
// of ints from `start` to `start + size`. The `start` is the seed, and the `size` is the number of
// iterations.
const rangeOfSizeNode: RangeOfSizeInvocation = {
id: RANGE_OF_SIZE,
type: 'range_of_size',
start: seed,
size: iterations,
};
// Iterate node to iterate over the seeds generated by the range of size node
const iterateNode: IterateInvocation = {
id: ITERATE,
type: 'iterate',
};
// Noise node without any seed
const noiseNode: NoiseInvocation = {
id: NOISE,
type: 'noise',
width,
height,
};
// Adding to the graph
graph.nodes[RANGE_OF_SIZE] = rangeOfSizeNode;
graph.nodes[ITERATE] = iterateNode;
graph.nodes[NOISE] = noiseNode;
// Connect range of size to iterate
graph.edges.push({
source: { node_id: RANGE_OF_SIZE, field: 'collection' },
destination: {
node_id: ITERATE,
field: 'collection',
},
});
// Connect iterate to noise
graph.edges.push({
source: {
node_id: ITERATE,
field: 'item',
},
destination: {
node_id: NOISE,
field: 'seed',
},
});
// Connect noise to t2l
graph.edges.push({
source: { node_id: NOISE, field: 'noise' },
destination: {
node_id: TEXT_TO_LATENTS,
field: 'noise',
},
});
}
// Multiple iterations, random seed
if (shouldRandomizeSeed && iterations > 1) {
// Random int node to generate the seed
const randomIntNode: RandomIntInvocation = {
id: RANDOM_INT,
type: 'rand_int',
};
// Range of size node to generate `iterations` count of seeds - range of size generates a collection
const rangeOfSizeNode: RangeOfSizeInvocation = {
id: RANGE_OF_SIZE,
type: 'range_of_size',
size: iterations,
};
// Iterate node to iterate over the seeds generated by the range of size node
const iterateNode: IterateInvocation = {
id: ITERATE,
type: 'iterate',
};
// Noise node without any seed
const noiseNode: NoiseInvocation = {
id: NOISE,
type: 'noise',
width,
height,
};
// Adding to the graph
graph.nodes[RANDOM_INT] = randomIntNode;
graph.nodes[RANGE_OF_SIZE] = rangeOfSizeNode;
graph.nodes[ITERATE] = iterateNode;
graph.nodes[NOISE] = noiseNode;
// Connect random int to the start of the range of size so the range starts on the random first seed
graph.edges.push({
source: { node_id: RANDOM_INT, field: 'a' },
destination: { node_id: RANGE_OF_SIZE, field: 'start' },
});
// Connect range of size to iterate
graph.edges.push({
source: { node_id: RANGE_OF_SIZE, field: 'collection' },
destination: {
node_id: ITERATE,
field: 'collection',
},
});
// Connect iterate to noise
graph.edges.push({
source: {
node_id: ITERATE,
field: 'item',
},
destination: {
node_id: NOISE,
field: 'seed',
},
});
// Connect noise to t2l
graph.edges.push({
source: { node_id: NOISE, field: 'noise' },
destination: {
node_id: TEXT_TO_LATENTS,
field: 'noise',
},
});
}
addControlNetToLinearGraph(graph, TEXT_TO_LATENTS, state);
return graph;
};

View File

@ -0,0 +1,20 @@
// friendly node ids
export const POSITIVE_CONDITIONING = 'positive_conditioning';
export const NEGATIVE_CONDITIONING = 'negative_conditioning';
export const TEXT_TO_LATENTS = 'text_to_latents';
export const LATENTS_TO_IMAGE = 'latents_to_image';
export const NOISE = 'noise';
export const RANDOM_INT = 'rand_int';
export const RANGE_OF_SIZE = 'range_of_size';
export const ITERATE = 'iterate';
export const MODEL_LOADER = 'model_loader';
export const IMAGE_TO_LATENTS = 'image_to_latents';
export const LATENTS_TO_LATENTS = 'latents_to_latents';
export const RESIZE = 'resize_image';
export const INPAINT = 'inpaint';
export const CONTROL_NET_COLLECT = 'control_net_collect';
// friendly graph ids
export const TEXT_TO_IMAGE_GRAPH = 'text_to_image_graph';
export const IMAGE_TO_IMAGE_GRAPH = 'image_to_image_graph';
export const INPAINT_GRAPH = 'inpaint_graph';

View File

@ -1,5 +1,4 @@
import ProcessButtons from 'features/parameters/components/ProcessButtons/ProcessButtons';
import ParamSeedCollapse from 'features/parameters/components/Parameters/Seed/ParamSeedCollapse';
import ParamVariationCollapse from 'features/parameters/components/Parameters/Variations/ParamVariationCollapse';
import ParamSymmetryCollapse from 'features/parameters/components/Parameters/Symmetry/ParamSymmetryCollapse';
import ParamInfillAndScalingCollapse from 'features/parameters/components/Parameters/Canvas/InfillAndScaling/ParamInfillAndScalingCollapse';
@ -8,6 +7,7 @@ import UnifiedCanvasCoreParameters from './UnifiedCanvasCoreParameters';
import { memo } from 'react';
import ParamPositiveConditioning from 'features/parameters/components/Parameters/Core/ParamPositiveConditioning';
import ParamNegativeConditioning from 'features/parameters/components/Parameters/Core/ParamNegativeConditioning';
import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse';
const UnifiedCanvasParameters = () => {
return (
@ -16,6 +16,7 @@ const UnifiedCanvasParameters = () => {
<ParamNegativeConditioning />
<ProcessButtons />
<UnifiedCanvasCoreParameters />
<ParamControlNetCollapse />
<ParamVariationCollapse />
<ParamSymmetryCollapse />
<ParamSeamCorrectionCollapse />

View File

@ -21,7 +21,6 @@ export type { ConditioningField } from './models/ConditioningField';
export type { ContentShuffleImageProcessorInvocation } from './models/ContentShuffleImageProcessorInvocation';
export type { ControlField } from './models/ControlField';
export type { ControlNetInvocation } from './models/ControlNetInvocation';
export type { ControlNetModelConfig } from './models/ControlNetModelConfig';
export type { ControlOutput } from './models/ControlOutput';
export type { CreateModelRequest } from './models/CreateModelRequest';
export type { CvInpaintInvocation } from './models/CvInpaintInvocation';
@ -56,7 +55,6 @@ export type { ImageProcessorInvocation } from './models/ImageProcessorInvocation
export type { ImageRecordChanges } from './models/ImageRecordChanges';
export type { ImageResizeInvocation } from './models/ImageResizeInvocation';
export type { ImageScaleInvocation } from './models/ImageScaleInvocation';
export type { ImageToImageInvocation } from './models/ImageToImageInvocation';
export type { ImageToLatentsInvocation } from './models/ImageToLatentsInvocation';
export type { ImageUrlsDTO } from './models/ImageUrlsDTO';
export type { InfillColorInvocation } from './models/InfillColorInvocation';
@ -65,6 +63,14 @@ export type { InfillTileInvocation } from './models/InfillTileInvocation';
export type { InpaintInvocation } from './models/InpaintInvocation';
export type { IntCollectionOutput } from './models/IntCollectionOutput';
export type { IntOutput } from './models/IntOutput';
export type { invokeai__backend__model_management__models__controlnet__ControlNetModel__Config } from './models/invokeai__backend__model_management__models__controlnet__ControlNetModel__Config';
export type { invokeai__backend__model_management__models__lora__LoRAModel__Config } from './models/invokeai__backend__model_management__models__lora__LoRAModel__Config';
export type { invokeai__backend__model_management__models__stable_diffusion__StableDiffusion1Model__CheckpointConfig } from './models/invokeai__backend__model_management__models__stable_diffusion__StableDiffusion1Model__CheckpointConfig';
export type { invokeai__backend__model_management__models__stable_diffusion__StableDiffusion1Model__DiffusersConfig } from './models/invokeai__backend__model_management__models__stable_diffusion__StableDiffusion1Model__DiffusersConfig';
export type { invokeai__backend__model_management__models__stable_diffusion__StableDiffusion2Model__CheckpointConfig } from './models/invokeai__backend__model_management__models__stable_diffusion__StableDiffusion2Model__CheckpointConfig';
export type { invokeai__backend__model_management__models__stable_diffusion__StableDiffusion2Model__DiffusersConfig } from './models/invokeai__backend__model_management__models__stable_diffusion__StableDiffusion2Model__DiffusersConfig';
export type { invokeai__backend__model_management__models__textual_inversion__TextualInversionModel__Config } from './models/invokeai__backend__model_management__models__textual_inversion__TextualInversionModel__Config';
export type { invokeai__backend__model_management__models__vae__VaeModel__Config } from './models/invokeai__backend__model_management__models__vae__VaeModel__Config';
export type { IterateInvocation } from './models/IterateInvocation';
export type { IterateInvocationOutput } from './models/IterateInvocationOutput';
export type { LatentsField } from './models/LatentsField';
@ -77,7 +83,6 @@ export type { LoadImageInvocation } from './models/LoadImageInvocation';
export type { LoraInfo } from './models/LoraInfo';
export type { LoraLoaderInvocation } from './models/LoraLoaderInvocation';
export type { LoraLoaderOutput } from './models/LoraLoaderOutput';
export type { LoRAModelConfig } from './models/LoRAModelConfig';
export type { MaskFromAlphaInvocation } from './models/MaskFromAlphaInvocation';
export type { MaskOutput } from './models/MaskOutput';
export type { MediapipeFaceProcessorInvocation } from './models/MediapipeFaceProcessorInvocation';
@ -113,20 +118,13 @@ export type { SchedulerPredictionType } from './models/SchedulerPredictionType';
export type { SD1ModelLoaderInvocation } from './models/SD1ModelLoaderInvocation';
export type { SD2ModelLoaderInvocation } from './models/SD2ModelLoaderInvocation';
export type { ShowImageInvocation } from './models/ShowImageInvocation';
export type { StableDiffusion1ModelCheckpointConfig } from './models/StableDiffusion1ModelCheckpointConfig';
export type { StableDiffusion1ModelDiffusersConfig } from './models/StableDiffusion1ModelDiffusersConfig';
export type { StableDiffusion2ModelCheckpointConfig } from './models/StableDiffusion2ModelCheckpointConfig';
export type { StableDiffusion2ModelDiffusersConfig } from './models/StableDiffusion2ModelDiffusersConfig';
export type { StepParamEasingInvocation } from './models/StepParamEasingInvocation';
export type { SubModelType } from './models/SubModelType';
export type { SubtractInvocation } from './models/SubtractInvocation';
export type { TextToImageInvocation } from './models/TextToImageInvocation';
export type { TextToLatentsInvocation } from './models/TextToLatentsInvocation';
export type { TextualInversionModelConfig } from './models/TextualInversionModelConfig';
export type { UNetField } from './models/UNetField';
export type { UpscaleInvocation } from './models/UpscaleInvocation';
export type { VaeField } from './models/VaeField';
export type { VaeModelConfig } from './models/VaeModelConfig';
export type { VaeRepo } from './models/VaeRepo';
export type { ValidationError } from './models/ValidationError';
export type { ZoeDepthImageProcessorInvocation } from './models/ZoeDepthImageProcessorInvocation';

View File

@ -19,3 +19,4 @@ export type ClipField = {
*/
loras: Array<LoraInfo>;
};

View File

@ -26,7 +26,6 @@ import type { ImagePasteInvocation } from './ImagePasteInvocation';
import type { ImageProcessorInvocation } from './ImageProcessorInvocation';
import type { ImageResizeInvocation } from './ImageResizeInvocation';
import type { ImageScaleInvocation } from './ImageScaleInvocation';
import type { ImageToImageInvocation } from './ImageToImageInvocation';
import type { ImageToLatentsInvocation } from './ImageToLatentsInvocation';
import type { InfillColorInvocation } from './InfillColorInvocation';
import type { InfillPatchMatchInvocation } from './InfillPatchMatchInvocation';
@ -62,7 +61,6 @@ import type { SD2ModelLoaderInvocation } from './SD2ModelLoaderInvocation';
import type { ShowImageInvocation } from './ShowImageInvocation';
import type { StepParamEasingInvocation } from './StepParamEasingInvocation';
import type { SubtractInvocation } from './SubtractInvocation';
import type { TextToImageInvocation } from './TextToImageInvocation';
import type { TextToLatentsInvocation } from './TextToLatentsInvocation';
import type { UpscaleInvocation } from './UpscaleInvocation';
import type { ZoeDepthImageProcessorInvocation } from './ZoeDepthImageProcessorInvocation';
@ -75,9 +73,10 @@ export type Graph = {
/**
* The nodes in this graph
*/
nodes?: Record<string, (RangeInvocation | RangeOfSizeInvocation | RandomRangeInvocation | SD1ModelLoaderInvocation | SD2ModelLoaderInvocation | LoraLoaderInvocation | CompelInvocation | LoadImageInvocation | ShowImageInvocation | ImageCropInvocation | ImagePasteInvocation | MaskFromAlphaInvocation | ImageMultiplyInvocation | ImageChannelInvocation | ImageConvertInvocation | ImageBlurInvocation | ImageResizeInvocation | ImageScaleInvocation | ImageLerpInvocation | ImageInverseLerpInvocation | ControlNetInvocation | ImageProcessorInvocation | CvInpaintInvocation | TextToImageInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | RandomIntInvocation | ParamIntInvocation | ParamFloatInvocation | FloatLinearRangeInvocation | StepParamEasingInvocation | DynamicPromptInvocation | RestoreFaceInvocation | UpscaleInvocation | GraphInvocation | IterateInvocation | CollectInvocation | CannyImageProcessorInvocation | HedImageProcessorInvocation | LineartImageProcessorInvocation | LineartAnimeImageProcessorInvocation | OpenposeImageProcessorInvocation | MidasDepthImageProcessorInvocation | NormalbaeImageProcessorInvocation | MlsdImageProcessorInvocation | PidiImageProcessorInvocation | ContentShuffleImageProcessorInvocation | ZoeDepthImageProcessorInvocation | MediapipeFaceProcessorInvocation | ImageToImageInvocation | LatentsToLatentsInvocation | InpaintInvocation)>;
nodes?: Record<string, (LoadImageInvocation | ShowImageInvocation | ImageCropInvocation | ImagePasteInvocation | MaskFromAlphaInvocation | ImageMultiplyInvocation | ImageChannelInvocation | ImageConvertInvocation | ImageBlurInvocation | ImageResizeInvocation | ImageScaleInvocation | ImageLerpInvocation | ImageInverseLerpInvocation | ControlNetInvocation | ImageProcessorInvocation | SD1ModelLoaderInvocation | SD2ModelLoaderInvocation | LoraLoaderInvocation | DynamicPromptInvocation | CompelInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | RandomIntInvocation | ParamIntInvocation | ParamFloatInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | CvInpaintInvocation | RangeInvocation | RangeOfSizeInvocation | RandomRangeInvocation | FloatLinearRangeInvocation | StepParamEasingInvocation | UpscaleInvocation | RestoreFaceInvocation | InpaintInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | GraphInvocation | IterateInvocation | CollectInvocation | CannyImageProcessorInvocation | HedImageProcessorInvocation | LineartImageProcessorInvocation | LineartAnimeImageProcessorInvocation | OpenposeImageProcessorInvocation | MidasDepthImageProcessorInvocation | NormalbaeImageProcessorInvocation | MlsdImageProcessorInvocation | PidiImageProcessorInvocation | ContentShuffleImageProcessorInvocation | ZoeDepthImageProcessorInvocation | MediapipeFaceProcessorInvocation | LatentsToLatentsInvocation)>;
/**
* The connections between nodes and their fields in this graph
*/
edges?: Array<Edge>;
};

View File

@ -48,7 +48,7 @@ export type GraphExecutionState = {
/**
* The results of node executions
*/
results: Record<string, (IntCollectionOutput | FloatCollectionOutput | ModelLoaderOutput | LoraLoaderOutput | CompelOutput | ImageOutput | MaskOutput | ControlOutput | LatentsOutput | NoiseOutput | IntOutput | FloatOutput | PromptOutput | PromptCollectionOutput | GraphInvocationOutput | IterateInvocationOutput | CollectInvocationOutput)>;
results: Record<string, (ImageOutput | MaskOutput | ControlOutput | ModelLoaderOutput | LoraLoaderOutput | PromptOutput | PromptCollectionOutput | CompelOutput | IntOutput | FloatOutput | LatentsOutput | NoiseOutput | IntCollectionOutput | FloatCollectionOutput | GraphInvocationOutput | IterateInvocationOutput | CollectInvocationOutput)>;
/**
* Errors raised when executing nodes
*/
@ -62,3 +62,4 @@ export type GraphExecutionState = {
*/
source_prepared_mapping: Record<string, Array<string>>;
};

View File

@ -1,76 +0,0 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Generates an image using img2img.
*/
export type ImageToImageInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'img2img';
/**
* The prompt to generate an image from
*/
prompt?: string;
/**
* The seed to use (omit for random)
*/
seed?: number;
/**
* The number of steps to use to generate the image
*/
steps?: number;
/**
* The width of the resulting image
*/
width?: number;
/**
* The height of the resulting image
*/
height?: number;
/**
* The Classifier-Free Guidance, higher values may result in a result closer to the prompt
*/
cfg_scale?: number;
/**
* The scheduler to use
*/
scheduler?: 'ddim' | 'ddpm' | 'deis' | 'lms' | 'pndm' | 'heun' | 'heun_k' | 'euler' | 'euler_k' | 'euler_a' | 'kdpm_2' | 'kdpm_2_a' | 'dpmpp_2s' | 'dpmpp_2m' | 'dpmpp_2m_k' | 'unipc';
/**
* The model to use (currently ignored)
*/
model?: string;
/**
* Whether or not to produce progress images during generation
*/
progress_images?: boolean;
/**
* The control model to use
*/
control_model?: string;
/**
* The processed control image
*/
control_image?: ImageField;
/**
* The input image
*/
image?: ImageField;
/**
* The strength of the original image
*/
strength?: number;
/**
* Whether or not the result should be fit to the aspect ratio of the input image
*/
fit?: boolean;
};

View File

@ -3,7 +3,10 @@
/* eslint-disable */
import type { ColorField } from './ColorField';
import type { ConditioningField } from './ConditioningField';
import type { ImageField } from './ImageField';
import type { UNetField } from './UNetField';
import type { VaeField } from './VaeField';
/**
* Generates an image using inpaint.
@ -19,9 +22,13 @@ export type InpaintInvocation = {
is_intermediate?: boolean;
type?: 'inpaint';
/**
* The prompt to generate an image from
* Positive conditioning for generation
*/
prompt?: string;
positive_conditioning?: ConditioningField;
/**
* Negative conditioning for generation
*/
negative_conditioning?: ConditioningField;
/**
* The seed to use (omit for random)
*/
@ -47,21 +54,13 @@ export type InpaintInvocation = {
*/
scheduler?: 'ddim' | 'ddpm' | 'deis' | 'lms' | 'lms_k' | 'pndm' | 'heun' | 'heun_k' | 'euler' | 'euler_k' | 'euler_a' | 'kdpm_2' | 'kdpm_2_a' | 'dpmpp_2s' | 'dpmpp_2s_k' | 'dpmpp_2m' | 'dpmpp_2m_k' | 'dpmpp_2m_sde' | 'dpmpp_2m_sde_k' | 'dpmpp_sde' | 'dpmpp_sde_k' | 'unipc';
/**
* The model to use (currently ignored)
* UNet model
*/
model?: string;
unet?: UNetField;
/**
* Whether or not to produce progress images during generation
* Vae model
*/
progress_images?: boolean;
/**
* The control model to use
*/
control_model?: string;
/**
* The processed control image
*/
control_image?: ImageField;
vae?: VaeField;
/**
* The input image
*/

View File

@ -28,3 +28,4 @@ export type LoraInfo = {
*/
weight: number;
};

View File

@ -35,3 +35,4 @@ export type LoraLoaderInvocation = {
*/
clip?: ClipField;
};

View File

@ -19,3 +19,4 @@ export type LoraLoaderOutput = {
*/
clip?: ClipField;
};

View File

@ -24,3 +24,4 @@ export type ModelInfo = {
*/
submodel?: SubModelType;
};

View File

@ -24,3 +24,4 @@ export type ModelLoaderOutput = {
*/
vae?: VaeField;
};

View File

@ -2,15 +2,16 @@
/* tslint:disable */
/* eslint-disable */
import type { ControlNetModelConfig } from './ControlNetModelConfig';
import type { LoRAModelConfig } from './LoRAModelConfig';
import type { StableDiffusion1ModelCheckpointConfig } from './StableDiffusion1ModelCheckpointConfig';
import type { StableDiffusion1ModelDiffusersConfig } from './StableDiffusion1ModelDiffusersConfig';
import type { StableDiffusion2ModelCheckpointConfig } from './StableDiffusion2ModelCheckpointConfig';
import type { StableDiffusion2ModelDiffusersConfig } from './StableDiffusion2ModelDiffusersConfig';
import type { TextualInversionModelConfig } from './TextualInversionModelConfig';
import type { VaeModelConfig } from './VaeModelConfig';
import type { invokeai__backend__model_management__models__controlnet__ControlNetModel__Config } from './invokeai__backend__model_management__models__controlnet__ControlNetModel__Config';
import type { invokeai__backend__model_management__models__lora__LoRAModel__Config } from './invokeai__backend__model_management__models__lora__LoRAModel__Config';
import type { invokeai__backend__model_management__models__stable_diffusion__StableDiffusion1Model__CheckpointConfig } from './invokeai__backend__model_management__models__stable_diffusion__StableDiffusion1Model__CheckpointConfig';
import type { invokeai__backend__model_management__models__stable_diffusion__StableDiffusion1Model__DiffusersConfig } from './invokeai__backend__model_management__models__stable_diffusion__StableDiffusion1Model__DiffusersConfig';
import type { invokeai__backend__model_management__models__stable_diffusion__StableDiffusion2Model__CheckpointConfig } from './invokeai__backend__model_management__models__stable_diffusion__StableDiffusion2Model__CheckpointConfig';
import type { invokeai__backend__model_management__models__stable_diffusion__StableDiffusion2Model__DiffusersConfig } from './invokeai__backend__model_management__models__stable_diffusion__StableDiffusion2Model__DiffusersConfig';
import type { invokeai__backend__model_management__models__textual_inversion__TextualInversionModel__Config } from './invokeai__backend__model_management__models__textual_inversion__TextualInversionModel__Config';
import type { invokeai__backend__model_management__models__vae__VaeModel__Config } from './invokeai__backend__model_management__models__vae__VaeModel__Config';
export type ModelsList = {
models: Record<string, Record<string, Record<string, (TextualInversionModelConfig | StableDiffusion2ModelDiffusersConfig | ControlNetModelConfig | StableDiffusion2ModelCheckpointConfig | StableDiffusion1ModelCheckpointConfig | VaeModelConfig | StableDiffusion1ModelDiffusersConfig | LoRAModelConfig)>>>;
models: Record<string, Record<string, Record<string, (invokeai__backend__model_management__models__stable_diffusion__StableDiffusion1Model__DiffusersConfig | invokeai__backend__model_management__models__controlnet__ControlNetModel__Config | invokeai__backend__model_management__models__lora__LoRAModel__Config | invokeai__backend__model_management__models__stable_diffusion__StableDiffusion2Model__DiffusersConfig | invokeai__backend__model_management__models__textual_inversion__TextualInversionModel__Config | invokeai__backend__model_management__models__vae__VaeModel__Config | invokeai__backend__model_management__models__stable_diffusion__StableDiffusion2Model__CheckpointConfig | invokeai__backend__model_management__models__stable_diffusion__StableDiffusion1Model__CheckpointConfig)>>>;
};

View File

@ -20,3 +20,4 @@ export type SD1ModelLoaderInvocation = {
*/
model_name?: string;
};

View File

@ -20,3 +20,4 @@ export type SD2ModelLoaderInvocation = {
*/
model_name?: string;
};

View File

@ -1,64 +0,0 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Generates an image using text2img.
*/
export type TextToImageInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'txt2img';
/**
* The prompt to generate an image from
*/
prompt?: string;
/**
* The seed to use (omit for random)
*/
seed?: number;
/**
* The number of steps to use to generate the image
*/
steps?: number;
/**
* The width of the resulting image
*/
width?: number;
/**
* The height of the resulting image
*/
height?: number;
/**
* The Classifier-Free Guidance, higher values may result in a result closer to the prompt
*/
cfg_scale?: number;
/**
* The scheduler to use
*/
scheduler?: 'ddim' | 'ddpm' | 'deis' | 'lms' | 'pndm' | 'heun' | 'heun_k' | 'euler' | 'euler_k' | 'euler_a' | 'kdpm_2' | 'kdpm_2_a' | 'dpmpp_2s' | 'dpmpp_2m' | 'dpmpp_2m_k' | 'unipc';
/**
* The model to use (currently ignored)
*/
model?: string;
/**
* Whether or not to produce progress images during generation
*/
progress_images?: boolean;
/**
* The control model to use
*/
control_model?: string;
/**
* The processed control image
*/
control_image?: ImageField;
};

View File

@ -19,3 +19,4 @@ export type UNetField = {
*/
loras: Array<LoraInfo>;
};

View File

@ -10,3 +10,4 @@ export type VaeField = {
*/
vae: ModelInfo;
};

View File

@ -0,0 +1,14 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ModelError } from './ModelError';
export type invokeai__backend__model_management__models__controlnet__ControlNetModel__Config = {
path: string;
description?: string;
format: ('checkpoint' | 'diffusers');
default?: boolean;
error?: ModelError;
};

View File

@ -0,0 +1,14 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ModelError } from './ModelError';
export type invokeai__backend__model_management__models__lora__LoRAModel__Config = {
path: string;
description?: string;
format: ('lycoris' | 'diffusers');
default?: boolean;
error?: ModelError;
};

View File

@ -0,0 +1,18 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ModelError } from './ModelError';
import type { ModelVariantType } from './ModelVariantType';
export type invokeai__backend__model_management__models__stable_diffusion__StableDiffusion1Model__CheckpointConfig = {
path: string;
description?: string;
format: 'checkpoint';
default?: boolean;
error?: ModelError;
vae?: string;
config?: string;
variant: ModelVariantType;
};

View File

@ -0,0 +1,17 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ModelError } from './ModelError';
import type { ModelVariantType } from './ModelVariantType';
export type invokeai__backend__model_management__models__stable_diffusion__StableDiffusion1Model__DiffusersConfig = {
path: string;
description?: string;
format: 'diffusers';
default?: boolean;
error?: ModelError;
vae?: string;
variant: ModelVariantType;
};

View File

@ -0,0 +1,21 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ModelError } from './ModelError';
import type { ModelVariantType } from './ModelVariantType';
import type { SchedulerPredictionType } from './SchedulerPredictionType';
export type invokeai__backend__model_management__models__stable_diffusion__StableDiffusion2Model__CheckpointConfig = {
path: string;
description?: string;
format: 'checkpoint';
default?: boolean;
error?: ModelError;
vae?: string;
config?: string;
variant: ModelVariantType;
prediction_type: SchedulerPredictionType;
upcast_attention: boolean;
};

View File

@ -0,0 +1,20 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ModelError } from './ModelError';
import type { ModelVariantType } from './ModelVariantType';
import type { SchedulerPredictionType } from './SchedulerPredictionType';
export type invokeai__backend__model_management__models__stable_diffusion__StableDiffusion2Model__DiffusersConfig = {
path: string;
description?: string;
format: 'diffusers';
default?: boolean;
error?: ModelError;
vae?: string;
variant: ModelVariantType;
prediction_type: SchedulerPredictionType;
upcast_attention: boolean;
};

View File

@ -0,0 +1,14 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ModelError } from './ModelError';
export type invokeai__backend__model_management__models__textual_inversion__TextualInversionModel__Config = {
path: string;
description?: string;
format: null;
default?: boolean;
error?: ModelError;
};

View File

@ -0,0 +1,14 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ModelError } from './ModelError';
export type invokeai__backend__model_management__models__vae__VaeModel__Config = {
path: string;
description?: string;
format: ('checkpoint' | 'diffusers');
default?: boolean;
error?: ModelError;
};

View File

@ -19,18 +19,18 @@ export class ModelsService {
* @throws ApiError
*/
public static listModels({
baseModel,
modelType,
}: {
/**
* Base model
*/
baseModel?: BaseModelType,
/**
* The type of model to get
*/
modelType?: ModelType,
}): CancelablePromise<ModelsList> {
baseModel,
modelType,
}: {
/**
* Base model
*/
baseModel?: BaseModelType,
/**
* The type of model to get
*/
modelType?: ModelType,
}): CancelablePromise<ModelsList> {
return __request(OpenAPI, {
method: 'GET',
url: '/api/v1/models/',
@ -51,10 +51,10 @@ modelType?: ModelType,
* @throws ApiError
*/
public static updateModel({
requestBody,
}: {
requestBody: CreateModelRequest,
}): CancelablePromise<any> {
requestBody,
}: {
requestBody: CreateModelRequest,
}): CancelablePromise<any> {
return __request(OpenAPI, {
method: 'POST',
url: '/api/v1/models/',
@ -73,10 +73,10 @@ requestBody: CreateModelRequest,
* @throws ApiError
*/
public static delModel({
modelName,
}: {
modelName: string,
}): CancelablePromise<any> {
modelName,
}: {
modelName: string,
}): CancelablePromise<any> {
return __request(OpenAPI, {
method: 'DELETE',
url: '/api/v1/models/{model_name}',

View File

@ -27,7 +27,6 @@ import type { ImagePasteInvocation } from '../models/ImagePasteInvocation';
import type { ImageProcessorInvocation } from '../models/ImageProcessorInvocation';
import type { ImageResizeInvocation } from '../models/ImageResizeInvocation';
import type { ImageScaleInvocation } from '../models/ImageScaleInvocation';
import type { ImageToImageInvocation } from '../models/ImageToImageInvocation';
import type { ImageToLatentsInvocation } from '../models/ImageToLatentsInvocation';
import type { InfillColorInvocation } from '../models/InfillColorInvocation';
import type { InfillPatchMatchInvocation } from '../models/InfillPatchMatchInvocation';
@ -64,7 +63,6 @@ import type { SD2ModelLoaderInvocation } from '../models/SD2ModelLoaderInvocatio
import type { ShowImageInvocation } from '../models/ShowImageInvocation';
import type { StepParamEasingInvocation } from '../models/StepParamEasingInvocation';
import type { SubtractInvocation } from '../models/SubtractInvocation';
import type { TextToImageInvocation } from '../models/TextToImageInvocation';
import type { TextToLatentsInvocation } from '../models/TextToLatentsInvocation';
import type { UpscaleInvocation } from '../models/UpscaleInvocation';
import type { ZoeDepthImageProcessorInvocation } from '../models/ZoeDepthImageProcessorInvocation';
@ -82,23 +80,23 @@ export class SessionsService {
* @throws ApiError
*/
public static listSessions({
page,
perPage = 10,
query = '',
}: {
/**
* The page of results to get
*/
page?: number,
/**
* The number of results per page
*/
perPage?: number,
/**
* The query string to search for
*/
query?: string,
}): CancelablePromise<PaginatedResults_GraphExecutionState_> {
page,
perPage = 10,
query = '',
}: {
/**
* The page of results to get
*/
page?: number,
/**
* The number of results per page
*/
perPage?: number,
/**
* The query string to search for
*/
query?: string,
}): CancelablePromise<PaginatedResults_GraphExecutionState_> {
return __request(OpenAPI, {
method: 'GET',
url: '/api/v1/sessions/',
@ -120,10 +118,10 @@ query?: string,
* @throws ApiError
*/
public static createSession({
requestBody,
}: {
requestBody?: Graph,
}): CancelablePromise<GraphExecutionState> {
requestBody,
}: {
requestBody?: Graph,
}): CancelablePromise<GraphExecutionState> {
return __request(OpenAPI, {
method: 'POST',
url: '/api/v1/sessions/',
@ -143,13 +141,13 @@ requestBody?: Graph,
* @throws ApiError
*/
public static getSession({
sessionId,
}: {
/**
* The id of the session to get
*/
sessionId: string,
}): CancelablePromise<GraphExecutionState> {
sessionId,
}: {
/**
* The id of the session to get
*/
sessionId: string,
}): CancelablePromise<GraphExecutionState> {
return __request(OpenAPI, {
method: 'GET',
url: '/api/v1/sessions/{session_id}',
@ -170,15 +168,15 @@ sessionId: string,
* @throws ApiError
*/
public static addNode({
sessionId,
requestBody,
}: {
/**
* The id of the session
*/
sessionId: string,
requestBody: (RangeInvocation | RangeOfSizeInvocation | RandomRangeInvocation | SD1ModelLoaderInvocation | SD2ModelLoaderInvocation | LoraLoaderInvocation | CompelInvocation | LoadImageInvocation | ShowImageInvocation | ImageCropInvocation | ImagePasteInvocation | MaskFromAlphaInvocation | ImageMultiplyInvocation | ImageChannelInvocation | ImageConvertInvocation | ImageBlurInvocation | ImageResizeInvocation | ImageScaleInvocation | ImageLerpInvocation | ImageInverseLerpInvocation | ControlNetInvocation | ImageProcessorInvocation | CvInpaintInvocation | TextToImageInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | RandomIntInvocation | ParamIntInvocation | ParamFloatInvocation | FloatLinearRangeInvocation | StepParamEasingInvocation | DynamicPromptInvocation | RestoreFaceInvocation | UpscaleInvocation | GraphInvocation | IterateInvocation | CollectInvocation | CannyImageProcessorInvocation | HedImageProcessorInvocation | LineartImageProcessorInvocation | LineartAnimeImageProcessorInvocation | OpenposeImageProcessorInvocation | MidasDepthImageProcessorInvocation | NormalbaeImageProcessorInvocation | MlsdImageProcessorInvocation | PidiImageProcessorInvocation | ContentShuffleImageProcessorInvocation | ZoeDepthImageProcessorInvocation | MediapipeFaceProcessorInvocation | ImageToImageInvocation | LatentsToLatentsInvocation | InpaintInvocation),
}): CancelablePromise<string> {
sessionId,
requestBody,
}: {
/**
* The id of the session
*/
sessionId: string,
requestBody: (LoadImageInvocation | ShowImageInvocation | ImageCropInvocation | ImagePasteInvocation | MaskFromAlphaInvocation | ImageMultiplyInvocation | ImageChannelInvocation | ImageConvertInvocation | ImageBlurInvocation | ImageResizeInvocation | ImageScaleInvocation | ImageLerpInvocation | ImageInverseLerpInvocation | ControlNetInvocation | ImageProcessorInvocation | SD1ModelLoaderInvocation | SD2ModelLoaderInvocation | LoraLoaderInvocation | DynamicPromptInvocation | CompelInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | RandomIntInvocation | ParamIntInvocation | ParamFloatInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | CvInpaintInvocation | RangeInvocation | RangeOfSizeInvocation | RandomRangeInvocation | FloatLinearRangeInvocation | StepParamEasingInvocation | UpscaleInvocation | RestoreFaceInvocation | InpaintInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | GraphInvocation | IterateInvocation | CollectInvocation | CannyImageProcessorInvocation | HedImageProcessorInvocation | LineartImageProcessorInvocation | LineartAnimeImageProcessorInvocation | OpenposeImageProcessorInvocation | MidasDepthImageProcessorInvocation | NormalbaeImageProcessorInvocation | MlsdImageProcessorInvocation | PidiImageProcessorInvocation | ContentShuffleImageProcessorInvocation | ZoeDepthImageProcessorInvocation | MediapipeFaceProcessorInvocation | LatentsToLatentsInvocation),
}): CancelablePromise<string> {
return __request(OpenAPI, {
method: 'POST',
url: '/api/v1/sessions/{session_id}/nodes',
@ -202,20 +200,20 @@ requestBody: (RangeInvocation | RangeOfSizeInvocation | RandomRangeInvocation |
* @throws ApiError
*/
public static updateNode({
sessionId,
nodePath,
requestBody,
}: {
/**
* The id of the session
*/
sessionId: string,
/**
* The path to the node in the graph
*/
nodePath: string,
requestBody: (RangeInvocation | RangeOfSizeInvocation | RandomRangeInvocation | SD1ModelLoaderInvocation | SD2ModelLoaderInvocation | LoraLoaderInvocation | CompelInvocation | LoadImageInvocation | ShowImageInvocation | ImageCropInvocation | ImagePasteInvocation | MaskFromAlphaInvocation | ImageMultiplyInvocation | ImageChannelInvocation | ImageConvertInvocation | ImageBlurInvocation | ImageResizeInvocation | ImageScaleInvocation | ImageLerpInvocation | ImageInverseLerpInvocation | ControlNetInvocation | ImageProcessorInvocation | CvInpaintInvocation | TextToImageInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | RandomIntInvocation | ParamIntInvocation | ParamFloatInvocation | FloatLinearRangeInvocation | StepParamEasingInvocation | DynamicPromptInvocation | RestoreFaceInvocation | UpscaleInvocation | GraphInvocation | IterateInvocation | CollectInvocation | CannyImageProcessorInvocation | HedImageProcessorInvocation | LineartImageProcessorInvocation | LineartAnimeImageProcessorInvocation | OpenposeImageProcessorInvocation | MidasDepthImageProcessorInvocation | NormalbaeImageProcessorInvocation | MlsdImageProcessorInvocation | PidiImageProcessorInvocation | ContentShuffleImageProcessorInvocation | ZoeDepthImageProcessorInvocation | MediapipeFaceProcessorInvocation | ImageToImageInvocation | LatentsToLatentsInvocation | InpaintInvocation),
}): CancelablePromise<GraphExecutionState> {
sessionId,
nodePath,
requestBody,
}: {
/**
* The id of the session
*/
sessionId: string,
/**
* The path to the node in the graph
*/
nodePath: string,
requestBody: (LoadImageInvocation | ShowImageInvocation | ImageCropInvocation | ImagePasteInvocation | MaskFromAlphaInvocation | ImageMultiplyInvocation | ImageChannelInvocation | ImageConvertInvocation | ImageBlurInvocation | ImageResizeInvocation | ImageScaleInvocation | ImageLerpInvocation | ImageInverseLerpInvocation | ControlNetInvocation | ImageProcessorInvocation | SD1ModelLoaderInvocation | SD2ModelLoaderInvocation | LoraLoaderInvocation | DynamicPromptInvocation | CompelInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | RandomIntInvocation | ParamIntInvocation | ParamFloatInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | CvInpaintInvocation | RangeInvocation | RangeOfSizeInvocation | RandomRangeInvocation | FloatLinearRangeInvocation | StepParamEasingInvocation | UpscaleInvocation | RestoreFaceInvocation | InpaintInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | GraphInvocation | IterateInvocation | CollectInvocation | CannyImageProcessorInvocation | HedImageProcessorInvocation | LineartImageProcessorInvocation | LineartAnimeImageProcessorInvocation | OpenposeImageProcessorInvocation | MidasDepthImageProcessorInvocation | NormalbaeImageProcessorInvocation | MlsdImageProcessorInvocation | PidiImageProcessorInvocation | ContentShuffleImageProcessorInvocation | ZoeDepthImageProcessorInvocation | MediapipeFaceProcessorInvocation | LatentsToLatentsInvocation),
}): CancelablePromise<GraphExecutionState> {
return __request(OpenAPI, {
method: 'PUT',
url: '/api/v1/sessions/{session_id}/nodes/{node_path}',
@ -240,18 +238,18 @@ requestBody: (RangeInvocation | RangeOfSizeInvocation | RandomRangeInvocation |
* @throws ApiError
*/
public static deleteNode({
sessionId,
nodePath,
}: {
/**
* The id of the session
*/
sessionId: string,
/**
* The path to the node to delete
*/
nodePath: string,
}): CancelablePromise<GraphExecutionState> {
sessionId,
nodePath,
}: {
/**
* The id of the session
*/
sessionId: string,
/**
* The path to the node to delete
*/
nodePath: string,
}): CancelablePromise<GraphExecutionState> {
return __request(OpenAPI, {
method: 'DELETE',
url: '/api/v1/sessions/{session_id}/nodes/{node_path}',
@ -274,15 +272,15 @@ nodePath: string,
* @throws ApiError
*/
public static addEdge({
sessionId,
requestBody,
}: {
/**
* The id of the session
*/
sessionId: string,
requestBody: Edge,
}): CancelablePromise<GraphExecutionState> {
sessionId,
requestBody,
}: {
/**
* The id of the session
*/
sessionId: string,
requestBody: Edge,
}): CancelablePromise<GraphExecutionState> {
return __request(OpenAPI, {
method: 'POST',
url: '/api/v1/sessions/{session_id}/edges',
@ -306,33 +304,33 @@ requestBody: Edge,
* @throws ApiError
*/
public static deleteEdge({
sessionId,
fromNodeId,
fromField,
toNodeId,
toField,
}: {
/**
* The id of the session
*/
sessionId: string,
/**
* The id of the node the edge is coming from
*/
fromNodeId: string,
/**
* The field of the node the edge is coming from
*/
fromField: string,
/**
* The id of the node the edge is going to
*/
toNodeId: string,
/**
* The field of the node the edge is going to
*/
toField: string,
}): CancelablePromise<GraphExecutionState> {
sessionId,
fromNodeId,
fromField,
toNodeId,
toField,
}: {
/**
* The id of the session
*/
sessionId: string,
/**
* The id of the node the edge is coming from
*/
fromNodeId: string,
/**
* The field of the node the edge is coming from
*/
fromField: string,
/**
* The id of the node the edge is going to
*/
toNodeId: string,
/**
* The field of the node the edge is going to
*/
toField: string,
}): CancelablePromise<GraphExecutionState> {
return __request(OpenAPI, {
method: 'DELETE',
url: '/api/v1/sessions/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}',
@ -358,18 +356,18 @@ toField: string,
* @throws ApiError
*/
public static invokeSession({
sessionId,
all = false,
}: {
/**
* The id of the session to invoke
*/
sessionId: string,
/**
* Whether or not to invoke all remaining invocations
*/
all?: boolean,
}): CancelablePromise<any> {
sessionId,
all = false,
}: {
/**
* The id of the session to invoke
*/
sessionId: string,
/**
* Whether or not to invoke all remaining invocations
*/
all?: boolean,
}): CancelablePromise<any> {
return __request(OpenAPI, {
method: 'PUT',
url: '/api/v1/sessions/{session_id}/invoke',
@ -394,13 +392,13 @@ all?: boolean,
* @throws ApiError
*/
public static cancelSessionInvoke({
sessionId,
}: {
/**
* The id of the session to cancel
*/
sessionId: string,
}): CancelablePromise<any> {
sessionId,
}: {
/**
* The id of the session to cancel
*/
sessionId: string,
}): CancelablePromise<any> {
return __request(OpenAPI, {
method: 'DELETE',
url: '/api/v1/sessions/{session_id}/invoke',