mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
feat: Add Scale Before Processing To Canvas Txt2Img / Img2Img (w/ SDXL)
This commit is contained in:
parent
3f8d17d6b7
commit
71c3955530
@ -375,6 +375,11 @@ class ImageResizeInvocation(BaseInvocation):
|
|||||||
width: int = InputField(default=512, ge=64, multiple_of=8, description="The width to resize to (px)")
|
width: int = InputField(default=512, ge=64, multiple_of=8, description="The width to resize to (px)")
|
||||||
height: int = InputField(default=512, ge=64, multiple_of=8, description="The height to resize to (px)")
|
height: int = InputField(default=512, ge=64, multiple_of=8, description="The height to resize to (px)")
|
||||||
resample_mode: PIL_RESAMPLING_MODES = InputField(default="bicubic", description="The resampling mode")
|
resample_mode: PIL_RESAMPLING_MODES = InputField(default="bicubic", description="The resampling mode")
|
||||||
|
metadata: CoreMetadata = InputField(
|
||||||
|
default=None,
|
||||||
|
description=FieldDescriptions.core_metadata,
|
||||||
|
ui_hidden=True,
|
||||||
|
)
|
||||||
|
|
||||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
image = context.services.images.get_pil_image(self.image.image_name)
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
@ -31,6 +31,11 @@ export const addVAEToGraph = (
|
|||||||
modelLoaderNodeId: string = MAIN_MODEL_LOADER
|
modelLoaderNodeId: string = MAIN_MODEL_LOADER
|
||||||
): void => {
|
): void => {
|
||||||
const { vae } = state.generation;
|
const { vae } = state.generation;
|
||||||
|
const { boundingBoxScaleMethod } = state.canvas;
|
||||||
|
|
||||||
|
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||||
|
boundingBoxScaleMethod
|
||||||
|
);
|
||||||
|
|
||||||
const isAutoVae = !vae;
|
const isAutoVae = !vae;
|
||||||
const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as
|
const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as
|
||||||
@ -77,7 +82,7 @@ export const addVAEToGraph = (
|
|||||||
field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae',
|
field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae',
|
||||||
},
|
},
|
||||||
destination: {
|
destination: {
|
||||||
node_id: CANVAS_OUTPUT,
|
node_id: isUsingScaledDimensions ? LATENTS_TO_IMAGE : CANVAS_OUTPUT,
|
||||||
field: 'vae',
|
field: 'vae',
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
@ -2,11 +2,7 @@ import { logger } from 'app/logging/logger';
|
|||||||
import { RootState } from 'app/store/store';
|
import { RootState } from 'app/store/store';
|
||||||
import { NonNullableGraph } from 'features/nodes/types/types';
|
import { NonNullableGraph } from 'features/nodes/types/types';
|
||||||
import { initialGenerationState } from 'features/parameters/store/generationSlice';
|
import { initialGenerationState } from 'features/parameters/store/generationSlice';
|
||||||
import {
|
import { ImageDTO, ImageToLatentsInvocation } from 'services/api/types';
|
||||||
ImageDTO,
|
|
||||||
ImageResizeInvocation,
|
|
||||||
ImageToLatentsInvocation,
|
|
||||||
} from 'services/api/types';
|
|
||||||
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
|
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
|
||||||
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
|
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
|
||||||
import { addLoRAsToGraph } from './addLoRAsToGraph';
|
import { addLoRAsToGraph } from './addLoRAsToGraph';
|
||||||
@ -19,12 +15,13 @@ import {
|
|||||||
CLIP_SKIP,
|
CLIP_SKIP,
|
||||||
DENOISE_LATENTS,
|
DENOISE_LATENTS,
|
||||||
IMAGE_TO_LATENTS,
|
IMAGE_TO_LATENTS,
|
||||||
|
IMG2IMG_RESIZE,
|
||||||
|
LATENTS_TO_IMAGE,
|
||||||
MAIN_MODEL_LOADER,
|
MAIN_MODEL_LOADER,
|
||||||
METADATA_ACCUMULATOR,
|
METADATA_ACCUMULATOR,
|
||||||
NEGATIVE_CONDITIONING,
|
NEGATIVE_CONDITIONING,
|
||||||
NOISE,
|
NOISE,
|
||||||
POSITIVE_CONDITIONING,
|
POSITIVE_CONDITIONING,
|
||||||
RESIZE,
|
|
||||||
} from './constants';
|
} from './constants';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -43,6 +40,7 @@ export const buildCanvasImageToImageGraph = (
|
|||||||
scheduler,
|
scheduler,
|
||||||
steps,
|
steps,
|
||||||
img2imgStrength: strength,
|
img2imgStrength: strength,
|
||||||
|
vaePrecision,
|
||||||
clipSkip,
|
clipSkip,
|
||||||
shouldUseCpuNoise,
|
shouldUseCpuNoise,
|
||||||
shouldUseNoiseSettings,
|
shouldUseNoiseSettings,
|
||||||
@ -51,7 +49,15 @@ export const buildCanvasImageToImageGraph = (
|
|||||||
// The bounding box determines width and height, not the width and height params
|
// The bounding box determines width and height, not the width and height params
|
||||||
const { width, height } = state.canvas.boundingBoxDimensions;
|
const { width, height } = state.canvas.boundingBoxDimensions;
|
||||||
|
|
||||||
const { shouldAutoSave } = state.canvas;
|
const {
|
||||||
|
scaledBoundingBoxDimensions,
|
||||||
|
boundingBoxScaleMethod,
|
||||||
|
shouldAutoSave,
|
||||||
|
} = state.canvas;
|
||||||
|
|
||||||
|
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||||
|
boundingBoxScaleMethod
|
||||||
|
);
|
||||||
|
|
||||||
if (!model) {
|
if (!model) {
|
||||||
log.error('No model found in state');
|
log.error('No model found in state');
|
||||||
@ -104,15 +110,17 @@ export const buildCanvasImageToImageGraph = (
|
|||||||
id: NOISE,
|
id: NOISE,
|
||||||
is_intermediate: true,
|
is_intermediate: true,
|
||||||
use_cpu,
|
use_cpu,
|
||||||
|
width: !isUsingScaledDimensions
|
||||||
|
? width
|
||||||
|
: scaledBoundingBoxDimensions.width,
|
||||||
|
height: !isUsingScaledDimensions
|
||||||
|
? height
|
||||||
|
: scaledBoundingBoxDimensions.height,
|
||||||
},
|
},
|
||||||
[IMAGE_TO_LATENTS]: {
|
[IMAGE_TO_LATENTS]: {
|
||||||
type: 'i2l',
|
type: 'i2l',
|
||||||
id: IMAGE_TO_LATENTS,
|
id: IMAGE_TO_LATENTS,
|
||||||
is_intermediate: true,
|
is_intermediate: true,
|
||||||
// must be set manually later, bc `fit` parameter may require a resize node inserted
|
|
||||||
// image: {
|
|
||||||
// image_name: initialImage.image_name,
|
|
||||||
// },
|
|
||||||
},
|
},
|
||||||
[DENOISE_LATENTS]: {
|
[DENOISE_LATENTS]: {
|
||||||
type: 'denoise_latents',
|
type: 'denoise_latents',
|
||||||
@ -214,8 +222,77 @@ export const buildCanvasImageToImageGraph = (
|
|||||||
field: 'latents',
|
field: 'latents',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Decode the denoised latents to an image
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
// Decode Latents To Image & Handle Scaled Before Processing
|
||||||
|
if (isUsingScaledDimensions) {
|
||||||
|
graph.nodes[IMG2IMG_RESIZE] = {
|
||||||
|
id: IMG2IMG_RESIZE,
|
||||||
|
type: 'img_resize',
|
||||||
|
is_intermediate: true,
|
||||||
|
image: initialImage,
|
||||||
|
width: scaledBoundingBoxDimensions.width,
|
||||||
|
height: scaledBoundingBoxDimensions.height,
|
||||||
|
};
|
||||||
|
graph.nodes[LATENTS_TO_IMAGE] = {
|
||||||
|
id: LATENTS_TO_IMAGE,
|
||||||
|
type: 'l2i',
|
||||||
|
is_intermediate: true,
|
||||||
|
fp32: vaePrecision === 'fp32' ? true : false,
|
||||||
|
};
|
||||||
|
graph.nodes[CANVAS_OUTPUT] = {
|
||||||
|
id: CANVAS_OUTPUT,
|
||||||
|
type: 'img_resize',
|
||||||
|
is_intermediate: !shouldAutoSave,
|
||||||
|
width: width,
|
||||||
|
height: height,
|
||||||
|
};
|
||||||
|
|
||||||
|
graph.edges.push(
|
||||||
{
|
{
|
||||||
|
source: {
|
||||||
|
node_id: IMG2IMG_RESIZE,
|
||||||
|
field: 'image',
|
||||||
|
},
|
||||||
|
destination: {
|
||||||
|
node_id: IMAGE_TO_LATENTS,
|
||||||
|
field: 'image',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
source: {
|
||||||
|
node_id: DENOISE_LATENTS,
|
||||||
|
field: 'latents',
|
||||||
|
},
|
||||||
|
destination: {
|
||||||
|
node_id: LATENTS_TO_IMAGE,
|
||||||
|
field: 'latents',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
source: {
|
||||||
|
node_id: LATENTS_TO_IMAGE,
|
||||||
|
field: 'image',
|
||||||
|
},
|
||||||
|
destination: {
|
||||||
|
node_id: CANVAS_OUTPUT,
|
||||||
|
field: 'image',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
graph.nodes[CANVAS_OUTPUT] = {
|
||||||
|
type: 'l2i',
|
||||||
|
id: CANVAS_OUTPUT,
|
||||||
|
is_intermediate: !shouldAutoSave,
|
||||||
|
fp32: vaePrecision === 'fp32' ? true : false,
|
||||||
|
};
|
||||||
|
|
||||||
|
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image =
|
||||||
|
initialImage;
|
||||||
|
|
||||||
|
graph.edges.push({
|
||||||
source: {
|
source: {
|
||||||
node_id: DENOISE_LATENTS,
|
node_id: DENOISE_LATENTS,
|
||||||
field: 'latents',
|
field: 'latents',
|
||||||
@ -224,73 +301,6 @@ export const buildCanvasImageToImageGraph = (
|
|||||||
node_id: CANVAS_OUTPUT,
|
node_id: CANVAS_OUTPUT,
|
||||||
field: 'latents',
|
field: 'latents',
|
||||||
},
|
},
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
|
||||||
|
|
||||||
// handle `fit`
|
|
||||||
if (initialImage.width !== width || initialImage.height !== height) {
|
|
||||||
// The init image needs to be resized to the specified width and height before being passed to `IMAGE_TO_LATENTS`
|
|
||||||
|
|
||||||
// Create a resize node, explicitly setting its image
|
|
||||||
const resizeNode: ImageResizeInvocation = {
|
|
||||||
id: RESIZE,
|
|
||||||
type: 'img_resize',
|
|
||||||
image: {
|
|
||||||
image_name: initialImage.image_name,
|
|
||||||
},
|
|
||||||
is_intermediate: true,
|
|
||||||
width,
|
|
||||||
height,
|
|
||||||
};
|
|
||||||
|
|
||||||
graph.nodes[RESIZE] = resizeNode;
|
|
||||||
|
|
||||||
// The `RESIZE` node then passes its image to `IMAGE_TO_LATENTS`
|
|
||||||
graph.edges.push({
|
|
||||||
source: { node_id: RESIZE, field: 'image' },
|
|
||||||
destination: {
|
|
||||||
node_id: IMAGE_TO_LATENTS,
|
|
||||||
field: 'image',
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
// The `RESIZE` node also passes its width and height to `NOISE`
|
|
||||||
graph.edges.push({
|
|
||||||
source: { node_id: RESIZE, field: 'width' },
|
|
||||||
destination: {
|
|
||||||
node_id: NOISE,
|
|
||||||
field: 'width',
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
graph.edges.push({
|
|
||||||
source: { node_id: RESIZE, field: 'height' },
|
|
||||||
destination: {
|
|
||||||
node_id: NOISE,
|
|
||||||
field: 'height',
|
|
||||||
},
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
// We are not resizing, so we need to set the image on the `IMAGE_TO_LATENTS` node explicitly
|
|
||||||
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image = {
|
|
||||||
image_name: initialImage.image_name,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Pass the image's dimensions to the `NOISE` node
|
|
||||||
graph.edges.push({
|
|
||||||
source: { node_id: IMAGE_TO_LATENTS, field: 'width' },
|
|
||||||
destination: {
|
|
||||||
node_id: NOISE,
|
|
||||||
field: 'width',
|
|
||||||
},
|
|
||||||
});
|
|
||||||
graph.edges.push({
|
|
||||||
source: { node_id: IMAGE_TO_LATENTS, field: 'height' },
|
|
||||||
destination: {
|
|
||||||
node_id: NOISE,
|
|
||||||
field: 'height',
|
|
||||||
},
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -300,8 +310,10 @@ export const buildCanvasImageToImageGraph = (
|
|||||||
type: 'metadata_accumulator',
|
type: 'metadata_accumulator',
|
||||||
generation_mode: 'img2img',
|
generation_mode: 'img2img',
|
||||||
cfg_scale,
|
cfg_scale,
|
||||||
height,
|
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
|
||||||
width,
|
height: !isUsingScaledDimensions
|
||||||
|
? height
|
||||||
|
: scaledBoundingBoxDimensions.height,
|
||||||
positive_prompt: '', // set in addDynamicPromptsToGraph
|
positive_prompt: '', // set in addDynamicPromptsToGraph
|
||||||
negative_prompt: negativePrompt,
|
negative_prompt: negativePrompt,
|
||||||
model,
|
model,
|
||||||
|
@ -2,11 +2,7 @@ import { logger } from 'app/logging/logger';
|
|||||||
import { RootState } from 'app/store/store';
|
import { RootState } from 'app/store/store';
|
||||||
import { NonNullableGraph } from 'features/nodes/types/types';
|
import { NonNullableGraph } from 'features/nodes/types/types';
|
||||||
import { initialGenerationState } from 'features/parameters/store/generationSlice';
|
import { initialGenerationState } from 'features/parameters/store/generationSlice';
|
||||||
import {
|
import { ImageDTO, ImageToLatentsInvocation } from 'services/api/types';
|
||||||
ImageDTO,
|
|
||||||
ImageResizeInvocation,
|
|
||||||
ImageToLatentsInvocation,
|
|
||||||
} from 'services/api/types';
|
|
||||||
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
|
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
|
||||||
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
|
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
|
||||||
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
|
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
|
||||||
@ -17,11 +13,12 @@ import { addWatermarkerToGraph } from './addWatermarkerToGraph';
|
|||||||
import {
|
import {
|
||||||
CANVAS_OUTPUT,
|
CANVAS_OUTPUT,
|
||||||
IMAGE_TO_LATENTS,
|
IMAGE_TO_LATENTS,
|
||||||
|
IMG2IMG_RESIZE,
|
||||||
|
LATENTS_TO_IMAGE,
|
||||||
METADATA_ACCUMULATOR,
|
METADATA_ACCUMULATOR,
|
||||||
NEGATIVE_CONDITIONING,
|
NEGATIVE_CONDITIONING,
|
||||||
NOISE,
|
NOISE,
|
||||||
POSITIVE_CONDITIONING,
|
POSITIVE_CONDITIONING,
|
||||||
RESIZE,
|
|
||||||
SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH,
|
SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH,
|
||||||
SDXL_DENOISE_LATENTS,
|
SDXL_DENOISE_LATENTS,
|
||||||
SDXL_MODEL_LOADER,
|
SDXL_MODEL_LOADER,
|
||||||
@ -59,7 +56,15 @@ export const buildCanvasSDXLImageToImageGraph = (
|
|||||||
// The bounding box determines width and height, not the width and height params
|
// The bounding box determines width and height, not the width and height params
|
||||||
const { width, height } = state.canvas.boundingBoxDimensions;
|
const { width, height } = state.canvas.boundingBoxDimensions;
|
||||||
|
|
||||||
const { shouldAutoSave } = state.canvas;
|
const {
|
||||||
|
scaledBoundingBoxDimensions,
|
||||||
|
boundingBoxScaleMethod,
|
||||||
|
shouldAutoSave,
|
||||||
|
} = state.canvas;
|
||||||
|
|
||||||
|
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||||
|
boundingBoxScaleMethod
|
||||||
|
);
|
||||||
|
|
||||||
if (!model) {
|
if (!model) {
|
||||||
log.error('No model found in state');
|
log.error('No model found in state');
|
||||||
@ -109,16 +114,18 @@ export const buildCanvasSDXLImageToImageGraph = (
|
|||||||
id: NOISE,
|
id: NOISE,
|
||||||
is_intermediate: true,
|
is_intermediate: true,
|
||||||
use_cpu,
|
use_cpu,
|
||||||
|
width: !isUsingScaledDimensions
|
||||||
|
? width
|
||||||
|
: scaledBoundingBoxDimensions.width,
|
||||||
|
height: !isUsingScaledDimensions
|
||||||
|
? height
|
||||||
|
: scaledBoundingBoxDimensions.height,
|
||||||
},
|
},
|
||||||
[IMAGE_TO_LATENTS]: {
|
[IMAGE_TO_LATENTS]: {
|
||||||
type: 'i2l',
|
type: 'i2l',
|
||||||
id: IMAGE_TO_LATENTS,
|
id: IMAGE_TO_LATENTS,
|
||||||
is_intermediate: true,
|
is_intermediate: true,
|
||||||
fp32: vaePrecision === 'fp32' ? true : false,
|
fp32: vaePrecision === 'fp32' ? true : false,
|
||||||
// must be set manually later, bc `fit` parameter may require a resize node inserted
|
|
||||||
// image: {
|
|
||||||
// image_name: initialImage.image_name,
|
|
||||||
// },
|
|
||||||
},
|
},
|
||||||
[SDXL_DENOISE_LATENTS]: {
|
[SDXL_DENOISE_LATENTS]: {
|
||||||
type: 'denoise_latents',
|
type: 'denoise_latents',
|
||||||
@ -132,12 +139,6 @@ export const buildCanvasSDXLImageToImageGraph = (
|
|||||||
: 1 - strength,
|
: 1 - strength,
|
||||||
denoising_end: shouldUseSDXLRefiner ? refinerStart : 1,
|
denoising_end: shouldUseSDXLRefiner ? refinerStart : 1,
|
||||||
},
|
},
|
||||||
[CANVAS_OUTPUT]: {
|
|
||||||
type: 'l2i',
|
|
||||||
id: CANVAS_OUTPUT,
|
|
||||||
is_intermediate: !shouldAutoSave,
|
|
||||||
fp32: vaePrecision === 'fp32' ? true : false,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
edges: [
|
edges: [
|
||||||
// Connect Model Loader To UNet & CLIP
|
// Connect Model Loader To UNet & CLIP
|
||||||
@ -232,8 +233,77 @@ export const buildCanvasSDXLImageToImageGraph = (
|
|||||||
field: 'latents',
|
field: 'latents',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Decode denoised latents to an image
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
// Decode Latents To Image & Handle Scaled Before Processing
|
||||||
|
if (isUsingScaledDimensions) {
|
||||||
|
graph.nodes[IMG2IMG_RESIZE] = {
|
||||||
|
id: IMG2IMG_RESIZE,
|
||||||
|
type: 'img_resize',
|
||||||
|
is_intermediate: true,
|
||||||
|
image: initialImage,
|
||||||
|
width: scaledBoundingBoxDimensions.width,
|
||||||
|
height: scaledBoundingBoxDimensions.height,
|
||||||
|
};
|
||||||
|
graph.nodes[LATENTS_TO_IMAGE] = {
|
||||||
|
id: LATENTS_TO_IMAGE,
|
||||||
|
type: 'l2i',
|
||||||
|
is_intermediate: true,
|
||||||
|
fp32: vaePrecision === 'fp32' ? true : false,
|
||||||
|
};
|
||||||
|
graph.nodes[CANVAS_OUTPUT] = {
|
||||||
|
id: CANVAS_OUTPUT,
|
||||||
|
type: 'img_resize',
|
||||||
|
is_intermediate: !shouldAutoSave,
|
||||||
|
width: width,
|
||||||
|
height: height,
|
||||||
|
};
|
||||||
|
|
||||||
|
graph.edges.push(
|
||||||
{
|
{
|
||||||
|
source: {
|
||||||
|
node_id: IMG2IMG_RESIZE,
|
||||||
|
field: 'image',
|
||||||
|
},
|
||||||
|
destination: {
|
||||||
|
node_id: IMAGE_TO_LATENTS,
|
||||||
|
field: 'image',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
source: {
|
||||||
|
node_id: SDXL_DENOISE_LATENTS,
|
||||||
|
field: 'latents',
|
||||||
|
},
|
||||||
|
destination: {
|
||||||
|
node_id: LATENTS_TO_IMAGE,
|
||||||
|
field: 'latents',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
source: {
|
||||||
|
node_id: LATENTS_TO_IMAGE,
|
||||||
|
field: 'image',
|
||||||
|
},
|
||||||
|
destination: {
|
||||||
|
node_id: CANVAS_OUTPUT,
|
||||||
|
field: 'image',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
graph.nodes[CANVAS_OUTPUT] = {
|
||||||
|
type: 'l2i',
|
||||||
|
id: CANVAS_OUTPUT,
|
||||||
|
is_intermediate: !shouldAutoSave,
|
||||||
|
fp32: vaePrecision === 'fp32' ? true : false,
|
||||||
|
};
|
||||||
|
|
||||||
|
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image =
|
||||||
|
initialImage;
|
||||||
|
|
||||||
|
graph.edges.push({
|
||||||
source: {
|
source: {
|
||||||
node_id: SDXL_DENOISE_LATENTS,
|
node_id: SDXL_DENOISE_LATENTS,
|
||||||
field: 'latents',
|
field: 'latents',
|
||||||
@ -242,73 +312,6 @@ export const buildCanvasSDXLImageToImageGraph = (
|
|||||||
node_id: CANVAS_OUTPUT,
|
node_id: CANVAS_OUTPUT,
|
||||||
field: 'latents',
|
field: 'latents',
|
||||||
},
|
},
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
|
||||||
|
|
||||||
// handle `fit`
|
|
||||||
if (initialImage.width !== width || initialImage.height !== height) {
|
|
||||||
// The init image needs to be resized to the specified width and height before being passed to `IMAGE_TO_LATENTS`
|
|
||||||
|
|
||||||
// Create a resize node, explicitly setting its image
|
|
||||||
const resizeNode: ImageResizeInvocation = {
|
|
||||||
id: RESIZE,
|
|
||||||
type: 'img_resize',
|
|
||||||
image: {
|
|
||||||
image_name: initialImage.image_name,
|
|
||||||
},
|
|
||||||
is_intermediate: true,
|
|
||||||
width,
|
|
||||||
height,
|
|
||||||
};
|
|
||||||
|
|
||||||
graph.nodes[RESIZE] = resizeNode;
|
|
||||||
|
|
||||||
// The `RESIZE` node then passes its image to `IMAGE_TO_LATENTS`
|
|
||||||
graph.edges.push({
|
|
||||||
source: { node_id: RESIZE, field: 'image' },
|
|
||||||
destination: {
|
|
||||||
node_id: IMAGE_TO_LATENTS,
|
|
||||||
field: 'image',
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
// The `RESIZE` node also passes its width and height to `NOISE`
|
|
||||||
graph.edges.push({
|
|
||||||
source: { node_id: RESIZE, field: 'width' },
|
|
||||||
destination: {
|
|
||||||
node_id: NOISE,
|
|
||||||
field: 'width',
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
graph.edges.push({
|
|
||||||
source: { node_id: RESIZE, field: 'height' },
|
|
||||||
destination: {
|
|
||||||
node_id: NOISE,
|
|
||||||
field: 'height',
|
|
||||||
},
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
// We are not resizing, so we need to set the image on the `IMAGE_TO_LATENTS` node explicitly
|
|
||||||
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image = {
|
|
||||||
image_name: initialImage.image_name,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Pass the image's dimensions to the `NOISE` node
|
|
||||||
graph.edges.push({
|
|
||||||
source: { node_id: IMAGE_TO_LATENTS, field: 'width' },
|
|
||||||
destination: {
|
|
||||||
node_id: NOISE,
|
|
||||||
field: 'width',
|
|
||||||
},
|
|
||||||
});
|
|
||||||
graph.edges.push({
|
|
||||||
source: { node_id: IMAGE_TO_LATENTS, field: 'height' },
|
|
||||||
destination: {
|
|
||||||
node_id: NOISE,
|
|
||||||
field: 'height',
|
|
||||||
},
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -318,8 +321,10 @@ export const buildCanvasSDXLImageToImageGraph = (
|
|||||||
type: 'metadata_accumulator',
|
type: 'metadata_accumulator',
|
||||||
generation_mode: 'img2img',
|
generation_mode: 'img2img',
|
||||||
cfg_scale,
|
cfg_scale,
|
||||||
height,
|
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
|
||||||
width,
|
height: !isUsingScaledDimensions
|
||||||
|
? height
|
||||||
|
: scaledBoundingBoxDimensions.height,
|
||||||
positive_prompt: '', // set in addDynamicPromptsToGraph
|
positive_prompt: '', // set in addDynamicPromptsToGraph
|
||||||
negative_prompt: negativePrompt,
|
negative_prompt: negativePrompt,
|
||||||
model,
|
model,
|
||||||
|
@ -15,6 +15,7 @@ import { addVAEToGraph } from './addVAEToGraph';
|
|||||||
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
|
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
|
||||||
import {
|
import {
|
||||||
CANVAS_OUTPUT,
|
CANVAS_OUTPUT,
|
||||||
|
LATENTS_TO_IMAGE,
|
||||||
METADATA_ACCUMULATOR,
|
METADATA_ACCUMULATOR,
|
||||||
NEGATIVE_CONDITIONING,
|
NEGATIVE_CONDITIONING,
|
||||||
NOISE,
|
NOISE,
|
||||||
@ -49,7 +50,15 @@ export const buildCanvasSDXLTextToImageGraph = (
|
|||||||
// The bounding box determines width and height, not the width and height params
|
// The bounding box determines width and height, not the width and height params
|
||||||
const { width, height } = state.canvas.boundingBoxDimensions;
|
const { width, height } = state.canvas.boundingBoxDimensions;
|
||||||
|
|
||||||
const { shouldAutoSave } = state.canvas;
|
const {
|
||||||
|
scaledBoundingBoxDimensions,
|
||||||
|
boundingBoxScaleMethod,
|
||||||
|
shouldAutoSave,
|
||||||
|
} = state.canvas;
|
||||||
|
|
||||||
|
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||||
|
boundingBoxScaleMethod
|
||||||
|
);
|
||||||
|
|
||||||
const { shouldUseSDXLRefiner, refinerStart, shouldConcatSDXLStylePrompt } =
|
const { shouldUseSDXLRefiner, refinerStart, shouldConcatSDXLStylePrompt } =
|
||||||
state.sdxl;
|
state.sdxl;
|
||||||
@ -136,17 +145,15 @@ export const buildCanvasSDXLTextToImageGraph = (
|
|||||||
type: 'noise',
|
type: 'noise',
|
||||||
id: NOISE,
|
id: NOISE,
|
||||||
is_intermediate: true,
|
is_intermediate: true,
|
||||||
width,
|
width: !isUsingScaledDimensions
|
||||||
height,
|
? width
|
||||||
|
: scaledBoundingBoxDimensions.width,
|
||||||
|
height: !isUsingScaledDimensions
|
||||||
|
? height
|
||||||
|
: scaledBoundingBoxDimensions.height,
|
||||||
use_cpu,
|
use_cpu,
|
||||||
},
|
},
|
||||||
[t2lNode.id]: t2lNode,
|
[t2lNode.id]: t2lNode,
|
||||||
[CANVAS_OUTPUT]: {
|
|
||||||
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
|
|
||||||
id: CANVAS_OUTPUT,
|
|
||||||
is_intermediate: !shouldAutoSave,
|
|
||||||
fp32: vaePrecision === 'fp32' ? true : false,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
edges: [
|
edges: [
|
||||||
// Connect Model Loader to UNet and CLIP
|
// Connect Model Loader to UNet and CLIP
|
||||||
@ -231,8 +238,57 @@ export const buildCanvasSDXLTextToImageGraph = (
|
|||||||
field: 'noise',
|
field: 'noise',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Decode Denoised Latents To Image
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
// Decode Latents To Image & Handle Scaled Before Processing
|
||||||
|
if (isUsingScaledDimensions) {
|
||||||
|
graph.nodes[LATENTS_TO_IMAGE] = {
|
||||||
|
id: LATENTS_TO_IMAGE,
|
||||||
|
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
|
||||||
|
is_intermediate: true,
|
||||||
|
fp32: vaePrecision === 'fp32' ? true : false,
|
||||||
|
};
|
||||||
|
|
||||||
|
graph.nodes[CANVAS_OUTPUT] = {
|
||||||
|
id: CANVAS_OUTPUT,
|
||||||
|
type: 'img_resize',
|
||||||
|
is_intermediate: !shouldAutoSave,
|
||||||
|
width: width,
|
||||||
|
height: height,
|
||||||
|
};
|
||||||
|
|
||||||
|
graph.edges.push(
|
||||||
{
|
{
|
||||||
|
source: {
|
||||||
|
node_id: SDXL_DENOISE_LATENTS,
|
||||||
|
field: 'latents',
|
||||||
|
},
|
||||||
|
destination: {
|
||||||
|
node_id: LATENTS_TO_IMAGE,
|
||||||
|
field: 'latents',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
source: {
|
||||||
|
node_id: LATENTS_TO_IMAGE,
|
||||||
|
field: 'image',
|
||||||
|
},
|
||||||
|
destination: {
|
||||||
|
node_id: CANVAS_OUTPUT,
|
||||||
|
field: 'image',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
graph.nodes[CANVAS_OUTPUT] = {
|
||||||
|
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
|
||||||
|
id: CANVAS_OUTPUT,
|
||||||
|
is_intermediate: !shouldAutoSave,
|
||||||
|
fp32: vaePrecision === 'fp32' ? true : false,
|
||||||
|
};
|
||||||
|
|
||||||
|
graph.edges.push({
|
||||||
source: {
|
source: {
|
||||||
node_id: SDXL_DENOISE_LATENTS,
|
node_id: SDXL_DENOISE_LATENTS,
|
||||||
field: 'latents',
|
field: 'latents',
|
||||||
@ -241,9 +297,8 @@ export const buildCanvasSDXLTextToImageGraph = (
|
|||||||
node_id: CANVAS_OUTPUT,
|
node_id: CANVAS_OUTPUT,
|
||||||
field: 'latents',
|
field: 'latents',
|
||||||
},
|
},
|
||||||
},
|
});
|
||||||
],
|
}
|
||||||
};
|
|
||||||
|
|
||||||
// add metadata accumulator, which is only mostly populated - some fields are added later
|
// add metadata accumulator, which is only mostly populated - some fields are added later
|
||||||
graph.nodes[METADATA_ACCUMULATOR] = {
|
graph.nodes[METADATA_ACCUMULATOR] = {
|
||||||
@ -251,8 +306,10 @@ export const buildCanvasSDXLTextToImageGraph = (
|
|||||||
type: 'metadata_accumulator',
|
type: 'metadata_accumulator',
|
||||||
generation_mode: 'txt2img',
|
generation_mode: 'txt2img',
|
||||||
cfg_scale,
|
cfg_scale,
|
||||||
height,
|
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
|
||||||
width,
|
height: !isUsingScaledDimensions
|
||||||
|
? height
|
||||||
|
: scaledBoundingBoxDimensions.height,
|
||||||
positive_prompt: '', // set in addDynamicPromptsToGraph
|
positive_prompt: '', // set in addDynamicPromptsToGraph
|
||||||
negative_prompt: negativePrompt,
|
negative_prompt: negativePrompt,
|
||||||
model,
|
model,
|
||||||
|
@ -17,6 +17,7 @@ import {
|
|||||||
CANVAS_TEXT_TO_IMAGE_GRAPH,
|
CANVAS_TEXT_TO_IMAGE_GRAPH,
|
||||||
CLIP_SKIP,
|
CLIP_SKIP,
|
||||||
DENOISE_LATENTS,
|
DENOISE_LATENTS,
|
||||||
|
LATENTS_TO_IMAGE,
|
||||||
MAIN_MODEL_LOADER,
|
MAIN_MODEL_LOADER,
|
||||||
METADATA_ACCUMULATOR,
|
METADATA_ACCUMULATOR,
|
||||||
NEGATIVE_CONDITIONING,
|
NEGATIVE_CONDITIONING,
|
||||||
@ -39,6 +40,7 @@ export const buildCanvasTextToImageGraph = (
|
|||||||
cfgScale: cfg_scale,
|
cfgScale: cfg_scale,
|
||||||
scheduler,
|
scheduler,
|
||||||
steps,
|
steps,
|
||||||
|
vaePrecision,
|
||||||
clipSkip,
|
clipSkip,
|
||||||
shouldUseCpuNoise,
|
shouldUseCpuNoise,
|
||||||
shouldUseNoiseSettings,
|
shouldUseNoiseSettings,
|
||||||
@ -47,7 +49,15 @@ export const buildCanvasTextToImageGraph = (
|
|||||||
// The bounding box determines width and height, not the width and height params
|
// The bounding box determines width and height, not the width and height params
|
||||||
const { width, height } = state.canvas.boundingBoxDimensions;
|
const { width, height } = state.canvas.boundingBoxDimensions;
|
||||||
|
|
||||||
const { shouldAutoSave } = state.canvas;
|
const {
|
||||||
|
scaledBoundingBoxDimensions,
|
||||||
|
boundingBoxScaleMethod,
|
||||||
|
shouldAutoSave,
|
||||||
|
} = state.canvas;
|
||||||
|
|
||||||
|
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||||
|
boundingBoxScaleMethod
|
||||||
|
);
|
||||||
|
|
||||||
if (!model) {
|
if (!model) {
|
||||||
log.error('No model found in state');
|
log.error('No model found in state');
|
||||||
@ -131,16 +141,15 @@ export const buildCanvasTextToImageGraph = (
|
|||||||
type: 'noise',
|
type: 'noise',
|
||||||
id: NOISE,
|
id: NOISE,
|
||||||
is_intermediate: true,
|
is_intermediate: true,
|
||||||
width,
|
width: !isUsingScaledDimensions
|
||||||
height,
|
? width
|
||||||
|
: scaledBoundingBoxDimensions.width,
|
||||||
|
height: !isUsingScaledDimensions
|
||||||
|
? height
|
||||||
|
: scaledBoundingBoxDimensions.height,
|
||||||
use_cpu,
|
use_cpu,
|
||||||
},
|
},
|
||||||
[t2lNode.id]: t2lNode,
|
[t2lNode.id]: t2lNode,
|
||||||
[CANVAS_OUTPUT]: {
|
|
||||||
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
|
|
||||||
id: CANVAS_OUTPUT,
|
|
||||||
is_intermediate: !shouldAutoSave,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
edges: [
|
edges: [
|
||||||
// Connect Model Loader to UNet & CLIP Skip
|
// Connect Model Loader to UNet & CLIP Skip
|
||||||
@ -216,8 +225,57 @@ export const buildCanvasTextToImageGraph = (
|
|||||||
field: 'noise',
|
field: 'noise',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Decode denoised latents to image
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
// Decode Latents To Image & Handle Scaled Before Processing
|
||||||
|
if (isUsingScaledDimensions) {
|
||||||
|
graph.nodes[LATENTS_TO_IMAGE] = {
|
||||||
|
id: LATENTS_TO_IMAGE,
|
||||||
|
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
|
||||||
|
is_intermediate: true,
|
||||||
|
fp32: vaePrecision === 'fp32' ? true : false,
|
||||||
|
};
|
||||||
|
|
||||||
|
graph.nodes[CANVAS_OUTPUT] = {
|
||||||
|
id: CANVAS_OUTPUT,
|
||||||
|
type: 'img_resize',
|
||||||
|
is_intermediate: !shouldAutoSave,
|
||||||
|
width: width,
|
||||||
|
height: height,
|
||||||
|
};
|
||||||
|
|
||||||
|
graph.edges.push(
|
||||||
{
|
{
|
||||||
|
source: {
|
||||||
|
node_id: DENOISE_LATENTS,
|
||||||
|
field: 'latents',
|
||||||
|
},
|
||||||
|
destination: {
|
||||||
|
node_id: LATENTS_TO_IMAGE,
|
||||||
|
field: 'latents',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
source: {
|
||||||
|
node_id: LATENTS_TO_IMAGE,
|
||||||
|
field: 'image',
|
||||||
|
},
|
||||||
|
destination: {
|
||||||
|
node_id: CANVAS_OUTPUT,
|
||||||
|
field: 'image',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
graph.nodes[CANVAS_OUTPUT] = {
|
||||||
|
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
|
||||||
|
id: CANVAS_OUTPUT,
|
||||||
|
is_intermediate: !shouldAutoSave,
|
||||||
|
fp32: vaePrecision === 'fp32' ? true : false,
|
||||||
|
};
|
||||||
|
|
||||||
|
graph.edges.push({
|
||||||
source: {
|
source: {
|
||||||
node_id: DENOISE_LATENTS,
|
node_id: DENOISE_LATENTS,
|
||||||
field: 'latents',
|
field: 'latents',
|
||||||
@ -226,9 +284,8 @@ export const buildCanvasTextToImageGraph = (
|
|||||||
node_id: CANVAS_OUTPUT,
|
node_id: CANVAS_OUTPUT,
|
||||||
field: 'latents',
|
field: 'latents',
|
||||||
},
|
},
|
||||||
},
|
});
|
||||||
],
|
}
|
||||||
};
|
|
||||||
|
|
||||||
// add metadata accumulator, which is only mostly populated - some fields are added later
|
// add metadata accumulator, which is only mostly populated - some fields are added later
|
||||||
graph.nodes[METADATA_ACCUMULATOR] = {
|
graph.nodes[METADATA_ACCUMULATOR] = {
|
||||||
@ -236,8 +293,10 @@ export const buildCanvasTextToImageGraph = (
|
|||||||
type: 'metadata_accumulator',
|
type: 'metadata_accumulator',
|
||||||
generation_mode: 'txt2img',
|
generation_mode: 'txt2img',
|
||||||
cfg_scale,
|
cfg_scale,
|
||||||
height,
|
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
|
||||||
width,
|
height: !isUsingScaledDimensions
|
||||||
|
? height
|
||||||
|
: scaledBoundingBoxDimensions.height,
|
||||||
positive_prompt: '', // set in addDynamicPromptsToGraph
|
positive_prompt: '', // set in addDynamicPromptsToGraph
|
||||||
negative_prompt: negativePrompt,
|
negative_prompt: negativePrompt,
|
||||||
model,
|
model,
|
||||||
|
@ -17,6 +17,7 @@ export const CLIP_SKIP = 'clip_skip';
|
|||||||
export const IMAGE_TO_LATENTS = 'image_to_latents';
|
export const IMAGE_TO_LATENTS = 'image_to_latents';
|
||||||
export const LATENTS_TO_LATENTS = 'latents_to_latents';
|
export const LATENTS_TO_LATENTS = 'latents_to_latents';
|
||||||
export const RESIZE = 'resize_image';
|
export const RESIZE = 'resize_image';
|
||||||
|
export const IMG2IMG_RESIZE = 'img2img_resize';
|
||||||
export const CANVAS_OUTPUT = 'canvas_output';
|
export const CANVAS_OUTPUT = 'canvas_output';
|
||||||
export const INPAINT_IMAGE = 'inpaint_image';
|
export const INPAINT_IMAGE = 'inpaint_image';
|
||||||
export const SCALED_INPAINT_IMAGE = 'scaled_inpaint_image';
|
export const SCALED_INPAINT_IMAGE = 'scaled_inpaint_image';
|
||||||
|
@ -1606,6 +1606,11 @@ export type components = {
|
|||||||
* @enum {string}
|
* @enum {string}
|
||||||
*/
|
*/
|
||||||
type: "create_denoise_mask";
|
type: "create_denoise_mask";
|
||||||
|
/**
|
||||||
|
* Vae
|
||||||
|
* @description VAE
|
||||||
|
*/
|
||||||
|
vae?: components["schemas"]["VaeField"];
|
||||||
/**
|
/**
|
||||||
* Image
|
* Image
|
||||||
* @description Image which will be masked
|
* @description Image which will be masked
|
||||||
@ -1616,11 +1621,6 @@ export type components = {
|
|||||||
* @description The mask to use when pasting
|
* @description The mask to use when pasting
|
||||||
*/
|
*/
|
||||||
mask?: components["schemas"]["ImageField"];
|
mask?: components["schemas"]["ImageField"];
|
||||||
/**
|
|
||||||
* Vae
|
|
||||||
* @description VAE
|
|
||||||
*/
|
|
||||||
vae?: components["schemas"]["VaeField"];
|
|
||||||
/**
|
/**
|
||||||
* Tiled
|
* Tiled
|
||||||
* @description Processing using overlapping tiles (reduce memory consumption)
|
* @description Processing using overlapping tiles (reduce memory consumption)
|
||||||
@ -2995,6 +2995,11 @@ export type components = {
|
|||||||
* @enum {string}
|
* @enum {string}
|
||||||
*/
|
*/
|
||||||
resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos";
|
resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos";
|
||||||
|
/**
|
||||||
|
* Metadata
|
||||||
|
* @description Optional core metadata to be written to image
|
||||||
|
*/
|
||||||
|
metadata?: components["schemas"]["CoreMetadata"];
|
||||||
};
|
};
|
||||||
/**
|
/**
|
||||||
* Image Saturation Adjustment
|
* Image Saturation Adjustment
|
||||||
@ -6407,18 +6412,6 @@ export type components = {
|
|||||||
* @enum {string}
|
* @enum {string}
|
||||||
*/
|
*/
|
||||||
StableDiffusionOnnxModelFormat: "olive" | "onnx";
|
StableDiffusionOnnxModelFormat: "olive" | "onnx";
|
||||||
/**
|
|
||||||
* StableDiffusionXLModelFormat
|
|
||||||
* @description An enumeration.
|
|
||||||
* @enum {string}
|
|
||||||
*/
|
|
||||||
StableDiffusionXLModelFormat: "checkpoint" | "diffusers";
|
|
||||||
/**
|
|
||||||
* StableDiffusion1ModelFormat
|
|
||||||
* @description An enumeration.
|
|
||||||
* @enum {string}
|
|
||||||
*/
|
|
||||||
StableDiffusion1ModelFormat: "checkpoint" | "diffusers";
|
|
||||||
/**
|
/**
|
||||||
* ControlNetModelFormat
|
* ControlNetModelFormat
|
||||||
* @description An enumeration.
|
* @description An enumeration.
|
||||||
@ -6431,6 +6424,18 @@ export type components = {
|
|||||||
* @enum {string}
|
* @enum {string}
|
||||||
*/
|
*/
|
||||||
StableDiffusion2ModelFormat: "checkpoint" | "diffusers";
|
StableDiffusion2ModelFormat: "checkpoint" | "diffusers";
|
||||||
|
/**
|
||||||
|
* StableDiffusionXLModelFormat
|
||||||
|
* @description An enumeration.
|
||||||
|
* @enum {string}
|
||||||
|
*/
|
||||||
|
StableDiffusionXLModelFormat: "checkpoint" | "diffusers";
|
||||||
|
/**
|
||||||
|
* StableDiffusion1ModelFormat
|
||||||
|
* @description An enumeration.
|
||||||
|
* @enum {string}
|
||||||
|
*/
|
||||||
|
StableDiffusion1ModelFormat: "checkpoint" | "diffusers";
|
||||||
};
|
};
|
||||||
responses: never;
|
responses: never;
|
||||||
parameters: never;
|
parameters: never;
|
||||||
|
Loading…
Reference in New Issue
Block a user