feat: Add Scale Before Processing To Canvas Txt2Img / Img2Img (w/ SDXL)

This commit is contained in:
blessedcoolant 2023-08-27 08:26:23 +12:00
parent 3f8d17d6b7
commit 71c3955530
8 changed files with 350 additions and 201 deletions

View File

@ -375,6 +375,11 @@ class ImageResizeInvocation(BaseInvocation):
width: int = InputField(default=512, ge=64, multiple_of=8, description="The width to resize to (px)")
height: int = InputField(default=512, ge=64, multiple_of=8, description="The height to resize to (px)")
resample_mode: PIL_RESAMPLING_MODES = InputField(default="bicubic", description="The resampling mode")
metadata: CoreMetadata = InputField(
default=None,
description=FieldDescriptions.core_metadata,
ui_hidden=True,
)
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get_pil_image(self.image.image_name)

View File

@ -31,6 +31,11 @@ export const addVAEToGraph = (
modelLoaderNodeId: string = MAIN_MODEL_LOADER
): void => {
const { vae } = state.generation;
const { boundingBoxScaleMethod } = state.canvas;
const isUsingScaledDimensions = ['auto', 'manual'].includes(
boundingBoxScaleMethod
);
const isAutoVae = !vae;
const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as
@ -77,7 +82,7 @@ export const addVAEToGraph = (
field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae',
},
destination: {
node_id: CANVAS_OUTPUT,
node_id: isUsingScaledDimensions ? LATENTS_TO_IMAGE : CANVAS_OUTPUT,
field: 'vae',
},
});

View File

@ -2,11 +2,7 @@ import { logger } from 'app/logging/logger';
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { initialGenerationState } from 'features/parameters/store/generationSlice';
import {
ImageDTO,
ImageResizeInvocation,
ImageToLatentsInvocation,
} from 'services/api/types';
import { ImageDTO, ImageToLatentsInvocation } from 'services/api/types';
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
@ -19,12 +15,13 @@ import {
CLIP_SKIP,
DENOISE_LATENTS,
IMAGE_TO_LATENTS,
IMG2IMG_RESIZE,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
RESIZE,
} from './constants';
/**
@ -43,6 +40,7 @@ export const buildCanvasImageToImageGraph = (
scheduler,
steps,
img2imgStrength: strength,
vaePrecision,
clipSkip,
shouldUseCpuNoise,
shouldUseNoiseSettings,
@ -51,7 +49,15 @@ export const buildCanvasImageToImageGraph = (
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const { shouldAutoSave } = state.canvas;
const {
scaledBoundingBoxDimensions,
boundingBoxScaleMethod,
shouldAutoSave,
} = state.canvas;
const isUsingScaledDimensions = ['auto', 'manual'].includes(
boundingBoxScaleMethod
);
if (!model) {
log.error('No model found in state');
@ -104,15 +110,17 @@ export const buildCanvasImageToImageGraph = (
id: NOISE,
is_intermediate: true,
use_cpu,
width: !isUsingScaledDimensions
? width
: scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions
? height
: scaledBoundingBoxDimensions.height,
},
[IMAGE_TO_LATENTS]: {
type: 'i2l',
id: IMAGE_TO_LATENTS,
is_intermediate: true,
// must be set manually later, bc `fit` parameter may require a resize node inserted
// image: {
// image_name: initialImage.image_name,
// },
},
[DENOISE_LATENTS]: {
type: 'denoise_latents',
@ -214,82 +222,84 @@ export const buildCanvasImageToImageGraph = (
field: 'latents',
},
},
// Decode the denoised latents to an image
],
};
// Decode Latents To Image & Handle Scaled Before Processing
if (isUsingScaledDimensions) {
graph.nodes[IMG2IMG_RESIZE] = {
id: IMG2IMG_RESIZE,
type: 'img_resize',
is_intermediate: true,
image: initialImage,
width: scaledBoundingBoxDimensions.width,
height: scaledBoundingBoxDimensions.height,
};
graph.nodes[LATENTS_TO_IMAGE] = {
id: LATENTS_TO_IMAGE,
type: 'l2i',
is_intermediate: true,
fp32: vaePrecision === 'fp32' ? true : false,
};
graph.nodes[CANVAS_OUTPUT] = {
id: CANVAS_OUTPUT,
type: 'img_resize',
is_intermediate: !shouldAutoSave,
width: width,
height: height,
};
graph.edges.push(
{
source: {
node_id: IMG2IMG_RESIZE,
field: 'image',
},
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'image',
},
},
{
source: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: CANVAS_OUTPUT,
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
],
};
// handle `fit`
if (initialImage.width !== width || initialImage.height !== height) {
// The init image needs to be resized to the specified width and height before being passed to `IMAGE_TO_LATENTS`
// Create a resize node, explicitly setting its image
const resizeNode: ImageResizeInvocation = {
id: RESIZE,
type: 'img_resize',
image: {
image_name: initialImage.image_name,
},
is_intermediate: true,
width,
height,
};
graph.nodes[RESIZE] = resizeNode;
// The `RESIZE` node then passes its image to `IMAGE_TO_LATENTS`
graph.edges.push({
source: { node_id: RESIZE, field: 'image' },
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'image',
},
});
// The `RESIZE` node also passes its width and height to `NOISE`
graph.edges.push({
source: { node_id: RESIZE, field: 'width' },
destination: {
node_id: NOISE,
field: 'width',
},
});
graph.edges.push({
source: { node_id: RESIZE, field: 'height' },
destination: {
node_id: NOISE,
field: 'height',
},
});
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'image',
},
}
);
} else {
// We are not resizing, so we need to set the image on the `IMAGE_TO_LATENTS` node explicitly
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image = {
image_name: initialImage.image_name,
graph.nodes[CANVAS_OUTPUT] = {
type: 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: !shouldAutoSave,
fp32: vaePrecision === 'fp32' ? true : false,
};
// Pass the image's dimensions to the `NOISE` node
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image =
initialImage;
graph.edges.push({
source: { node_id: IMAGE_TO_LATENTS, field: 'width' },
destination: {
node_id: NOISE,
field: 'width',
source: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
});
graph.edges.push({
source: { node_id: IMAGE_TO_LATENTS, field: 'height' },
destination: {
node_id: NOISE,
field: 'height',
node_id: CANVAS_OUTPUT,
field: 'latents',
},
});
}
@ -300,8 +310,10 @@ export const buildCanvasImageToImageGraph = (
type: 'metadata_accumulator',
generation_mode: 'img2img',
cfg_scale,
height,
width,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions
? height
: scaledBoundingBoxDimensions.height,
positive_prompt: '', // set in addDynamicPromptsToGraph
negative_prompt: negativePrompt,
model,

View File

@ -2,11 +2,7 @@ import { logger } from 'app/logging/logger';
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { initialGenerationState } from 'features/parameters/store/generationSlice';
import {
ImageDTO,
ImageResizeInvocation,
ImageToLatentsInvocation,
} from 'services/api/types';
import { ImageDTO, ImageToLatentsInvocation } from 'services/api/types';
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
@ -17,11 +13,12 @@ import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
CANVAS_OUTPUT,
IMAGE_TO_LATENTS,
IMG2IMG_RESIZE,
LATENTS_TO_IMAGE,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
POSITIVE_CONDITIONING,
RESIZE,
SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH,
SDXL_DENOISE_LATENTS,
SDXL_MODEL_LOADER,
@ -59,7 +56,15 @@ export const buildCanvasSDXLImageToImageGraph = (
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const { shouldAutoSave } = state.canvas;
const {
scaledBoundingBoxDimensions,
boundingBoxScaleMethod,
shouldAutoSave,
} = state.canvas;
const isUsingScaledDimensions = ['auto', 'manual'].includes(
boundingBoxScaleMethod
);
if (!model) {
log.error('No model found in state');
@ -109,16 +114,18 @@ export const buildCanvasSDXLImageToImageGraph = (
id: NOISE,
is_intermediate: true,
use_cpu,
width: !isUsingScaledDimensions
? width
: scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions
? height
: scaledBoundingBoxDimensions.height,
},
[IMAGE_TO_LATENTS]: {
type: 'i2l',
id: IMAGE_TO_LATENTS,
is_intermediate: true,
fp32: vaePrecision === 'fp32' ? true : false,
// must be set manually later, bc `fit` parameter may require a resize node inserted
// image: {
// image_name: initialImage.image_name,
// },
},
[SDXL_DENOISE_LATENTS]: {
type: 'denoise_latents',
@ -132,12 +139,6 @@ export const buildCanvasSDXLImageToImageGraph = (
: 1 - strength,
denoising_end: shouldUseSDXLRefiner ? refinerStart : 1,
},
[CANVAS_OUTPUT]: {
type: 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: !shouldAutoSave,
fp32: vaePrecision === 'fp32' ? true : false,
},
},
edges: [
// Connect Model Loader To UNet & CLIP
@ -232,82 +233,84 @@ export const buildCanvasSDXLImageToImageGraph = (
field: 'latents',
},
},
// Decode denoised latents to an image
],
};
// Decode Latents To Image & Handle Scaled Before Processing
if (isUsingScaledDimensions) {
graph.nodes[IMG2IMG_RESIZE] = {
id: IMG2IMG_RESIZE,
type: 'img_resize',
is_intermediate: true,
image: initialImage,
width: scaledBoundingBoxDimensions.width,
height: scaledBoundingBoxDimensions.height,
};
graph.nodes[LATENTS_TO_IMAGE] = {
id: LATENTS_TO_IMAGE,
type: 'l2i',
is_intermediate: true,
fp32: vaePrecision === 'fp32' ? true : false,
};
graph.nodes[CANVAS_OUTPUT] = {
id: CANVAS_OUTPUT,
type: 'img_resize',
is_intermediate: !shouldAutoSave,
width: width,
height: height,
};
graph.edges.push(
{
source: {
node_id: IMG2IMG_RESIZE,
field: 'image',
},
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'image',
},
},
{
source: {
node_id: SDXL_DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: CANVAS_OUTPUT,
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
],
};
// handle `fit`
if (initialImage.width !== width || initialImage.height !== height) {
// The init image needs to be resized to the specified width and height before being passed to `IMAGE_TO_LATENTS`
// Create a resize node, explicitly setting its image
const resizeNode: ImageResizeInvocation = {
id: RESIZE,
type: 'img_resize',
image: {
image_name: initialImage.image_name,
},
is_intermediate: true,
width,
height,
};
graph.nodes[RESIZE] = resizeNode;
// The `RESIZE` node then passes its image to `IMAGE_TO_LATENTS`
graph.edges.push({
source: { node_id: RESIZE, field: 'image' },
destination: {
node_id: IMAGE_TO_LATENTS,
field: 'image',
},
});
// The `RESIZE` node also passes its width and height to `NOISE`
graph.edges.push({
source: { node_id: RESIZE, field: 'width' },
destination: {
node_id: NOISE,
field: 'width',
},
});
graph.edges.push({
source: { node_id: RESIZE, field: 'height' },
destination: {
node_id: NOISE,
field: 'height',
},
});
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'image',
},
}
);
} else {
// We are not resizing, so we need to set the image on the `IMAGE_TO_LATENTS` node explicitly
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image = {
image_name: initialImage.image_name,
graph.nodes[CANVAS_OUTPUT] = {
type: 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: !shouldAutoSave,
fp32: vaePrecision === 'fp32' ? true : false,
};
// Pass the image's dimensions to the `NOISE` node
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image =
initialImage;
graph.edges.push({
source: { node_id: IMAGE_TO_LATENTS, field: 'width' },
destination: {
node_id: NOISE,
field: 'width',
source: {
node_id: SDXL_DENOISE_LATENTS,
field: 'latents',
},
});
graph.edges.push({
source: { node_id: IMAGE_TO_LATENTS, field: 'height' },
destination: {
node_id: NOISE,
field: 'height',
node_id: CANVAS_OUTPUT,
field: 'latents',
},
});
}
@ -318,8 +321,10 @@ export const buildCanvasSDXLImageToImageGraph = (
type: 'metadata_accumulator',
generation_mode: 'img2img',
cfg_scale,
height,
width,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions
? height
: scaledBoundingBoxDimensions.height,
positive_prompt: '', // set in addDynamicPromptsToGraph
negative_prompt: negativePrompt,
model,

View File

@ -15,6 +15,7 @@ import { addVAEToGraph } from './addVAEToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import {
CANVAS_OUTPUT,
LATENTS_TO_IMAGE,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
@ -49,7 +50,15 @@ export const buildCanvasSDXLTextToImageGraph = (
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const { shouldAutoSave } = state.canvas;
const {
scaledBoundingBoxDimensions,
boundingBoxScaleMethod,
shouldAutoSave,
} = state.canvas;
const isUsingScaledDimensions = ['auto', 'manual'].includes(
boundingBoxScaleMethod
);
const { shouldUseSDXLRefiner, refinerStart, shouldConcatSDXLStylePrompt } =
state.sdxl;
@ -136,17 +145,15 @@ export const buildCanvasSDXLTextToImageGraph = (
type: 'noise',
id: NOISE,
is_intermediate: true,
width,
height,
width: !isUsingScaledDimensions
? width
: scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions
? height
: scaledBoundingBoxDimensions.height,
use_cpu,
},
[t2lNode.id]: t2lNode,
[CANVAS_OUTPUT]: {
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: !shouldAutoSave,
fp32: vaePrecision === 'fp32' ? true : false,
},
},
edges: [
// Connect Model Loader to UNet and CLIP
@ -231,19 +238,67 @@ export const buildCanvasSDXLTextToImageGraph = (
field: 'noise',
},
},
// Decode Denoised Latents To Image
],
};
// Decode Latents To Image & Handle Scaled Before Processing
if (isUsingScaledDimensions) {
graph.nodes[LATENTS_TO_IMAGE] = {
id: LATENTS_TO_IMAGE,
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
is_intermediate: true,
fp32: vaePrecision === 'fp32' ? true : false,
};
graph.nodes[CANVAS_OUTPUT] = {
id: CANVAS_OUTPUT,
type: 'img_resize',
is_intermediate: !shouldAutoSave,
width: width,
height: height,
};
graph.edges.push(
{
source: {
node_id: SDXL_DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: CANVAS_OUTPUT,
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
],
};
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'image',
},
}
);
} else {
graph.nodes[CANVAS_OUTPUT] = {
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: !shouldAutoSave,
fp32: vaePrecision === 'fp32' ? true : false,
};
graph.edges.push({
source: {
node_id: SDXL_DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'latents',
},
});
}
// add metadata accumulator, which is only mostly populated - some fields are added later
graph.nodes[METADATA_ACCUMULATOR] = {
@ -251,8 +306,10 @@ export const buildCanvasSDXLTextToImageGraph = (
type: 'metadata_accumulator',
generation_mode: 'txt2img',
cfg_scale,
height,
width,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions
? height
: scaledBoundingBoxDimensions.height,
positive_prompt: '', // set in addDynamicPromptsToGraph
negative_prompt: negativePrompt,
model,

View File

@ -17,6 +17,7 @@ import {
CANVAS_TEXT_TO_IMAGE_GRAPH,
CLIP_SKIP,
DENOISE_LATENTS,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
@ -39,6 +40,7 @@ export const buildCanvasTextToImageGraph = (
cfgScale: cfg_scale,
scheduler,
steps,
vaePrecision,
clipSkip,
shouldUseCpuNoise,
shouldUseNoiseSettings,
@ -47,7 +49,15 @@ export const buildCanvasTextToImageGraph = (
// The bounding box determines width and height, not the width and height params
const { width, height } = state.canvas.boundingBoxDimensions;
const { shouldAutoSave } = state.canvas;
const {
scaledBoundingBoxDimensions,
boundingBoxScaleMethod,
shouldAutoSave,
} = state.canvas;
const isUsingScaledDimensions = ['auto', 'manual'].includes(
boundingBoxScaleMethod
);
if (!model) {
log.error('No model found in state');
@ -131,16 +141,15 @@ export const buildCanvasTextToImageGraph = (
type: 'noise',
id: NOISE,
is_intermediate: true,
width,
height,
width: !isUsingScaledDimensions
? width
: scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions
? height
: scaledBoundingBoxDimensions.height,
use_cpu,
},
[t2lNode.id]: t2lNode,
[CANVAS_OUTPUT]: {
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: !shouldAutoSave,
},
},
edges: [
// Connect Model Loader to UNet & CLIP Skip
@ -216,19 +225,67 @@ export const buildCanvasTextToImageGraph = (
field: 'noise',
},
},
// Decode denoised latents to image
],
};
// Decode Latents To Image & Handle Scaled Before Processing
if (isUsingScaledDimensions) {
graph.nodes[LATENTS_TO_IMAGE] = {
id: LATENTS_TO_IMAGE,
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
is_intermediate: true,
fp32: vaePrecision === 'fp32' ? true : false,
};
graph.nodes[CANVAS_OUTPUT] = {
id: CANVAS_OUTPUT,
type: 'img_resize',
is_intermediate: !shouldAutoSave,
width: width,
height: height,
};
graph.edges.push(
{
source: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: CANVAS_OUTPUT,
node_id: LATENTS_TO_IMAGE,
field: 'latents',
},
},
],
};
{
source: {
node_id: LATENTS_TO_IMAGE,
field: 'image',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'image',
},
}
);
} else {
graph.nodes[CANVAS_OUTPUT] = {
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
id: CANVAS_OUTPUT,
is_intermediate: !shouldAutoSave,
fp32: vaePrecision === 'fp32' ? true : false,
};
graph.edges.push({
source: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: CANVAS_OUTPUT,
field: 'latents',
},
});
}
// add metadata accumulator, which is only mostly populated - some fields are added later
graph.nodes[METADATA_ACCUMULATOR] = {
@ -236,8 +293,10 @@ export const buildCanvasTextToImageGraph = (
type: 'metadata_accumulator',
generation_mode: 'txt2img',
cfg_scale,
height,
width,
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
height: !isUsingScaledDimensions
? height
: scaledBoundingBoxDimensions.height,
positive_prompt: '', // set in addDynamicPromptsToGraph
negative_prompt: negativePrompt,
model,

View File

@ -17,6 +17,7 @@ export const CLIP_SKIP = 'clip_skip';
export const IMAGE_TO_LATENTS = 'image_to_latents';
export const LATENTS_TO_LATENTS = 'latents_to_latents';
export const RESIZE = 'resize_image';
export const IMG2IMG_RESIZE = 'img2img_resize';
export const CANVAS_OUTPUT = 'canvas_output';
export const INPAINT_IMAGE = 'inpaint_image';
export const SCALED_INPAINT_IMAGE = 'scaled_inpaint_image';

View File

@ -1606,6 +1606,11 @@ export type components = {
* @enum {string}
*/
type: "create_denoise_mask";
/**
* Vae
* @description VAE
*/
vae?: components["schemas"]["VaeField"];
/**
* Image
* @description Image which will be masked
@ -1616,11 +1621,6 @@ export type components = {
* @description The mask to use when pasting
*/
mask?: components["schemas"]["ImageField"];
/**
* Vae
* @description VAE
*/
vae?: components["schemas"]["VaeField"];
/**
* Tiled
* @description Processing using overlapping tiles (reduce memory consumption)
@ -2995,6 +2995,11 @@ export type components = {
* @enum {string}
*/
resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos";
/**
* Metadata
* @description Optional core metadata to be written to image
*/
metadata?: components["schemas"]["CoreMetadata"];
};
/**
* Image Saturation Adjustment
@ -6407,18 +6412,6 @@ export type components = {
* @enum {string}
*/
StableDiffusionOnnxModelFormat: "olive" | "onnx";
/**
* StableDiffusionXLModelFormat
* @description An enumeration.
* @enum {string}
*/
StableDiffusionXLModelFormat: "checkpoint" | "diffusers";
/**
* StableDiffusion1ModelFormat
* @description An enumeration.
* @enum {string}
*/
StableDiffusion1ModelFormat: "checkpoint" | "diffusers";
/**
* ControlNetModelFormat
* @description An enumeration.
@ -6431,6 +6424,18 @@ export type components = {
* @enum {string}
*/
StableDiffusion2ModelFormat: "checkpoint" | "diffusers";
/**
* StableDiffusionXLModelFormat
* @description An enumeration.
* @enum {string}
*/
StableDiffusionXLModelFormat: "checkpoint" | "diffusers";
/**
* StableDiffusion1ModelFormat
* @description An enumeration.
* @enum {string}
*/
StableDiffusion1ModelFormat: "checkpoint" | "diffusers";
};
responses: never;
parameters: never;