mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into nextprevcurrentimagenode
This commit is contained in:
@ -10,6 +10,7 @@ import ColorInputField from './inputs/ColorInputField';
|
||||
import ConditioningInputField from './inputs/ConditioningInputField';
|
||||
import ControlInputField from './inputs/ControlInputField';
|
||||
import ControlNetModelInputField from './inputs/ControlNetModelInputField';
|
||||
import DenoiseMaskInputField from './inputs/DenoiseMaskInputField';
|
||||
import EnumInputField from './inputs/EnumInputField';
|
||||
import ImageCollectionInputField from './inputs/ImageCollectionInputField';
|
||||
import ImageInputField from './inputs/ImageInputField';
|
||||
@ -105,6 +106,19 @@ const InputFieldRenderer = ({ nodeId, fieldName }: InputFieldProps) => {
|
||||
);
|
||||
}
|
||||
|
||||
if (
|
||||
field?.type === 'DenoiseMaskField' &&
|
||||
fieldTemplate?.type === 'DenoiseMaskField'
|
||||
) {
|
||||
return (
|
||||
<DenoiseMaskInputField
|
||||
nodeId={nodeId}
|
||||
field={field}
|
||||
fieldTemplate={fieldTemplate}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
if (
|
||||
field?.type === 'ConditioningField' &&
|
||||
fieldTemplate?.type === 'ConditioningField'
|
||||
|
@ -0,0 +1,17 @@
|
||||
import {
|
||||
DenoiseMaskInputFieldTemplate,
|
||||
DenoiseMaskInputFieldValue,
|
||||
FieldComponentProps,
|
||||
} from 'features/nodes/types/types';
|
||||
import { memo } from 'react';
|
||||
|
||||
const DenoiseMaskInputFieldComponent = (
|
||||
_props: FieldComponentProps<
|
||||
DenoiseMaskInputFieldValue,
|
||||
DenoiseMaskInputFieldTemplate
|
||||
>
|
||||
) => {
|
||||
return null;
|
||||
};
|
||||
|
||||
export default memo(DenoiseMaskInputFieldComponent);
|
@ -59,6 +59,11 @@ export const FIELDS: Record<FieldType, FieldUIConfig> = {
|
||||
description: 'Images may be passed between nodes.',
|
||||
color: 'purple.500',
|
||||
},
|
||||
DenoiseMaskField: {
|
||||
title: 'Denoise Mask',
|
||||
description: 'Denoise Mask may be passed between nodes',
|
||||
color: 'red.700',
|
||||
},
|
||||
LatentsField: {
|
||||
title: 'Latents',
|
||||
description: 'Latents may be passed between nodes.',
|
||||
|
@ -64,6 +64,7 @@ export const zFieldType = z.enum([
|
||||
'string',
|
||||
'array',
|
||||
'ImageField',
|
||||
'DenoiseMaskField',
|
||||
'LatentsField',
|
||||
'ConditioningField',
|
||||
'ControlField',
|
||||
@ -120,6 +121,7 @@ export type InputFieldTemplate =
|
||||
| StringInputFieldTemplate
|
||||
| BooleanInputFieldTemplate
|
||||
| ImageInputFieldTemplate
|
||||
| DenoiseMaskInputFieldTemplate
|
||||
| LatentsInputFieldTemplate
|
||||
| ConditioningInputFieldTemplate
|
||||
| UNetInputFieldTemplate
|
||||
@ -205,6 +207,12 @@ export const zConditioningField = z.object({
|
||||
});
|
||||
export type ConditioningField = z.infer<typeof zConditioningField>;
|
||||
|
||||
export const zDenoiseMaskField = z.object({
|
||||
mask_name: z.string().trim().min(1),
|
||||
masked_latents_name: z.string().trim().min(1).optional(),
|
||||
});
|
||||
export type DenoiseMaskFieldValue = z.infer<typeof zDenoiseMaskField>;
|
||||
|
||||
export const zIntegerInputFieldValue = zInputFieldValueBase.extend({
|
||||
type: z.literal('integer'),
|
||||
value: z.number().optional(),
|
||||
@ -241,6 +249,14 @@ export const zLatentsInputFieldValue = zInputFieldValueBase.extend({
|
||||
});
|
||||
export type LatentsInputFieldValue = z.infer<typeof zLatentsInputFieldValue>;
|
||||
|
||||
export const zDenoiseMaskInputFieldValue = zInputFieldValueBase.extend({
|
||||
type: z.literal('DenoiseMaskField'),
|
||||
value: zDenoiseMaskField.optional(),
|
||||
});
|
||||
export type DenoiseMaskInputFieldValue = z.infer<
|
||||
typeof zDenoiseMaskInputFieldValue
|
||||
>;
|
||||
|
||||
export const zConditioningInputFieldValue = zInputFieldValueBase.extend({
|
||||
type: z.literal('ConditioningField'),
|
||||
value: zConditioningField.optional(),
|
||||
@ -459,6 +475,7 @@ export const zInputFieldValue = z.discriminatedUnion('type', [
|
||||
zBooleanInputFieldValue,
|
||||
zImageInputFieldValue,
|
||||
zLatentsInputFieldValue,
|
||||
zDenoiseMaskInputFieldValue,
|
||||
zConditioningInputFieldValue,
|
||||
zUNetInputFieldValue,
|
||||
zClipInputFieldValue,
|
||||
@ -532,6 +549,11 @@ export type ImageCollectionInputFieldTemplate = InputFieldTemplateBase & {
|
||||
type: 'ImageCollection';
|
||||
};
|
||||
|
||||
export type DenoiseMaskInputFieldTemplate = InputFieldTemplateBase & {
|
||||
default: undefined;
|
||||
type: 'DenoiseMaskField';
|
||||
};
|
||||
|
||||
export type LatentsInputFieldTemplate = InputFieldTemplateBase & {
|
||||
default: string;
|
||||
type: 'LatentsField';
|
||||
|
@ -8,6 +8,7 @@ import {
|
||||
ConditioningInputFieldTemplate,
|
||||
ControlInputFieldTemplate,
|
||||
ControlNetModelInputFieldTemplate,
|
||||
DenoiseMaskInputFieldTemplate,
|
||||
EnumInputFieldTemplate,
|
||||
FieldType,
|
||||
FloatInputFieldTemplate,
|
||||
@ -263,6 +264,19 @@ const buildImageCollectionInputFieldTemplate = ({
|
||||
return template;
|
||||
};
|
||||
|
||||
const buildDenoiseMaskInputFieldTemplate = ({
|
||||
schemaObject,
|
||||
baseField,
|
||||
}: BuildInputFieldArg): DenoiseMaskInputFieldTemplate => {
|
||||
const template: DenoiseMaskInputFieldTemplate = {
|
||||
...baseField,
|
||||
type: 'DenoiseMaskField',
|
||||
default: schemaObject.default ?? undefined,
|
||||
};
|
||||
|
||||
return template;
|
||||
};
|
||||
|
||||
const buildLatentsInputFieldTemplate = ({
|
||||
schemaObject,
|
||||
baseField,
|
||||
@ -498,6 +512,12 @@ export const buildInputFieldTemplate = (
|
||||
baseField,
|
||||
});
|
||||
}
|
||||
if (fieldType === 'DenoiseMaskField') {
|
||||
return buildDenoiseMaskInputFieldTemplate({
|
||||
schemaObject: fieldSchema,
|
||||
baseField,
|
||||
});
|
||||
}
|
||||
if (fieldType === 'LatentsField') {
|
||||
return buildLatentsInputFieldTemplate({
|
||||
schemaObject: fieldSchema,
|
||||
|
@ -49,6 +49,10 @@ export const buildInputFieldValue = (
|
||||
fieldValue.value = [];
|
||||
}
|
||||
|
||||
if (template.type === 'DenoiseMaskField') {
|
||||
fieldValue.value = undefined;
|
||||
}
|
||||
|
||||
if (template.type === 'LatentsField') {
|
||||
fieldValue.value = undefined;
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ import {
|
||||
CANVAS_TEXT_TO_IMAGE_GRAPH,
|
||||
IMAGE_TO_IMAGE_GRAPH,
|
||||
IMAGE_TO_LATENTS,
|
||||
INPAINT_CREATE_MASK,
|
||||
INPAINT_IMAGE,
|
||||
LATENTS_TO_IMAGE,
|
||||
MAIN_MODEL_LOADER,
|
||||
@ -30,6 +31,11 @@ export const addVAEToGraph = (
|
||||
modelLoaderNodeId: string = MAIN_MODEL_LOADER
|
||||
): void => {
|
||||
const { vae } = state.generation;
|
||||
const { boundingBoxScaleMethod } = state.canvas;
|
||||
|
||||
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||
boundingBoxScaleMethod
|
||||
);
|
||||
|
||||
const isAutoVae = !vae;
|
||||
const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as
|
||||
@ -76,7 +82,7 @@ export const addVAEToGraph = (
|
||||
field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_OUTPUT,
|
||||
node_id: isUsingScaledDimensions ? LATENTS_TO_IMAGE : CANVAS_OUTPUT,
|
||||
field: 'vae',
|
||||
},
|
||||
});
|
||||
@ -117,6 +123,16 @@ export const addVAEToGraph = (
|
||||
field: 'vae',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae',
|
||||
},
|
||||
destination: {
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'vae',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER,
|
||||
|
@ -2,11 +2,7 @@ import { logger } from 'app/logging/logger';
|
||||
import { RootState } from 'app/store/store';
|
||||
import { NonNullableGraph } from 'features/nodes/types/types';
|
||||
import { initialGenerationState } from 'features/parameters/store/generationSlice';
|
||||
import {
|
||||
ImageDTO,
|
||||
ImageResizeInvocation,
|
||||
ImageToLatentsInvocation,
|
||||
} from 'services/api/types';
|
||||
import { ImageDTO, ImageToLatentsInvocation } from 'services/api/types';
|
||||
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
|
||||
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
|
||||
import { addLoRAsToGraph } from './addLoRAsToGraph';
|
||||
@ -19,12 +15,13 @@ import {
|
||||
CLIP_SKIP,
|
||||
DENOISE_LATENTS,
|
||||
IMAGE_TO_LATENTS,
|
||||
IMG2IMG_RESIZE,
|
||||
LATENTS_TO_IMAGE,
|
||||
MAIN_MODEL_LOADER,
|
||||
METADATA_ACCUMULATOR,
|
||||
NEGATIVE_CONDITIONING,
|
||||
NOISE,
|
||||
POSITIVE_CONDITIONING,
|
||||
RESIZE,
|
||||
} from './constants';
|
||||
|
||||
/**
|
||||
@ -43,6 +40,7 @@ export const buildCanvasImageToImageGraph = (
|
||||
scheduler,
|
||||
steps,
|
||||
img2imgStrength: strength,
|
||||
vaePrecision,
|
||||
clipSkip,
|
||||
shouldUseCpuNoise,
|
||||
shouldUseNoiseSettings,
|
||||
@ -51,7 +49,15 @@ export const buildCanvasImageToImageGraph = (
|
||||
// The bounding box determines width and height, not the width and height params
|
||||
const { width, height } = state.canvas.boundingBoxDimensions;
|
||||
|
||||
const { shouldAutoSave } = state.canvas;
|
||||
const {
|
||||
scaledBoundingBoxDimensions,
|
||||
boundingBoxScaleMethod,
|
||||
shouldAutoSave,
|
||||
} = state.canvas;
|
||||
|
||||
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||
boundingBoxScaleMethod
|
||||
);
|
||||
|
||||
if (!model) {
|
||||
log.error('No model found in state');
|
||||
@ -104,15 +110,17 @@ export const buildCanvasImageToImageGraph = (
|
||||
id: NOISE,
|
||||
is_intermediate: true,
|
||||
use_cpu,
|
||||
width: !isUsingScaledDimensions
|
||||
? width
|
||||
: scaledBoundingBoxDimensions.width,
|
||||
height: !isUsingScaledDimensions
|
||||
? height
|
||||
: scaledBoundingBoxDimensions.height,
|
||||
},
|
||||
[IMAGE_TO_LATENTS]: {
|
||||
type: 'i2l',
|
||||
id: IMAGE_TO_LATENTS,
|
||||
is_intermediate: true,
|
||||
// must be set manually later, bc `fit` parameter may require a resize node inserted
|
||||
// image: {
|
||||
// image_name: initialImage.image_name,
|
||||
// },
|
||||
},
|
||||
[DENOISE_LATENTS]: {
|
||||
type: 'denoise_latents',
|
||||
@ -214,82 +222,84 @@ export const buildCanvasImageToImageGraph = (
|
||||
field: 'latents',
|
||||
},
|
||||
},
|
||||
// Decode the denoised latents to an image
|
||||
],
|
||||
};
|
||||
|
||||
// Decode Latents To Image & Handle Scaled Before Processing
|
||||
if (isUsingScaledDimensions) {
|
||||
graph.nodes[IMG2IMG_RESIZE] = {
|
||||
id: IMG2IMG_RESIZE,
|
||||
type: 'img_resize',
|
||||
is_intermediate: true,
|
||||
image: initialImage,
|
||||
width: scaledBoundingBoxDimensions.width,
|
||||
height: scaledBoundingBoxDimensions.height,
|
||||
};
|
||||
graph.nodes[LATENTS_TO_IMAGE] = {
|
||||
id: LATENTS_TO_IMAGE,
|
||||
type: 'l2i',
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
};
|
||||
graph.nodes[CANVAS_OUTPUT] = {
|
||||
id: CANVAS_OUTPUT,
|
||||
type: 'img_resize',
|
||||
is_intermediate: !shouldAutoSave,
|
||||
width: width,
|
||||
height: height,
|
||||
};
|
||||
|
||||
graph.edges.push(
|
||||
{
|
||||
source: {
|
||||
node_id: IMG2IMG_RESIZE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: IMAGE_TO_LATENTS,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: DENOISE_LATENTS,
|
||||
field: 'latents',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_OUTPUT,
|
||||
node_id: LATENTS_TO_IMAGE,
|
||||
field: 'latents',
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
// handle `fit`
|
||||
if (initialImage.width !== width || initialImage.height !== height) {
|
||||
// The init image needs to be resized to the specified width and height before being passed to `IMAGE_TO_LATENTS`
|
||||
|
||||
// Create a resize node, explicitly setting its image
|
||||
const resizeNode: ImageResizeInvocation = {
|
||||
id: RESIZE,
|
||||
type: 'img_resize',
|
||||
image: {
|
||||
image_name: initialImage.image_name,
|
||||
},
|
||||
is_intermediate: true,
|
||||
width,
|
||||
height,
|
||||
};
|
||||
|
||||
graph.nodes[RESIZE] = resizeNode;
|
||||
|
||||
// The `RESIZE` node then passes its image to `IMAGE_TO_LATENTS`
|
||||
graph.edges.push({
|
||||
source: { node_id: RESIZE, field: 'image' },
|
||||
destination: {
|
||||
node_id: IMAGE_TO_LATENTS,
|
||||
field: 'image',
|
||||
},
|
||||
});
|
||||
|
||||
// The `RESIZE` node also passes its width and height to `NOISE`
|
||||
graph.edges.push({
|
||||
source: { node_id: RESIZE, field: 'width' },
|
||||
destination: {
|
||||
node_id: NOISE,
|
||||
field: 'width',
|
||||
},
|
||||
});
|
||||
|
||||
graph.edges.push({
|
||||
source: { node_id: RESIZE, field: 'height' },
|
||||
destination: {
|
||||
node_id: NOISE,
|
||||
field: 'height',
|
||||
},
|
||||
});
|
||||
{
|
||||
source: {
|
||||
node_id: LATENTS_TO_IMAGE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_OUTPUT,
|
||||
field: 'image',
|
||||
},
|
||||
}
|
||||
);
|
||||
} else {
|
||||
// We are not resizing, so we need to set the image on the `IMAGE_TO_LATENTS` node explicitly
|
||||
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image = {
|
||||
image_name: initialImage.image_name,
|
||||
graph.nodes[CANVAS_OUTPUT] = {
|
||||
type: 'l2i',
|
||||
id: CANVAS_OUTPUT,
|
||||
is_intermediate: !shouldAutoSave,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
};
|
||||
|
||||
// Pass the image's dimensions to the `NOISE` node
|
||||
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image =
|
||||
initialImage;
|
||||
|
||||
graph.edges.push({
|
||||
source: { node_id: IMAGE_TO_LATENTS, field: 'width' },
|
||||
destination: {
|
||||
node_id: NOISE,
|
||||
field: 'width',
|
||||
source: {
|
||||
node_id: DENOISE_LATENTS,
|
||||
field: 'latents',
|
||||
},
|
||||
});
|
||||
graph.edges.push({
|
||||
source: { node_id: IMAGE_TO_LATENTS, field: 'height' },
|
||||
destination: {
|
||||
node_id: NOISE,
|
||||
field: 'height',
|
||||
node_id: CANVAS_OUTPUT,
|
||||
field: 'latents',
|
||||
},
|
||||
});
|
||||
}
|
||||
@ -300,8 +310,10 @@ export const buildCanvasImageToImageGraph = (
|
||||
type: 'metadata_accumulator',
|
||||
generation_mode: 'img2img',
|
||||
cfg_scale,
|
||||
height,
|
||||
width,
|
||||
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
|
||||
height: !isUsingScaledDimensions
|
||||
? height
|
||||
: scaledBoundingBoxDimensions.height,
|
||||
positive_prompt: '', // set in addDynamicPromptsToGraph
|
||||
negative_prompt: negativePrompt,
|
||||
model,
|
||||
|
@ -2,6 +2,7 @@ import { logger } from 'app/logging/logger';
|
||||
import { RootState } from 'app/store/store';
|
||||
import { NonNullableGraph } from 'features/nodes/types/types';
|
||||
import {
|
||||
CreateDenoiseMaskInvocation,
|
||||
ImageBlurInvocation,
|
||||
ImageDTO,
|
||||
ImageToLatentsInvocation,
|
||||
@ -15,13 +16,14 @@ import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
|
||||
import { addVAEToGraph } from './addVAEToGraph';
|
||||
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
|
||||
import {
|
||||
CANVAS_INPAINT_GRAPH,
|
||||
CANVAS_OUTPUT,
|
||||
CANVAS_COHERENCE_DENOISE_LATENTS,
|
||||
CANVAS_COHERENCE_NOISE,
|
||||
CANVAS_COHERENCE_NOISE_INCREMENT,
|
||||
CANVAS_INPAINT_GRAPH,
|
||||
CANVAS_OUTPUT,
|
||||
CLIP_SKIP,
|
||||
DENOISE_LATENTS,
|
||||
INPAINT_CREATE_MASK,
|
||||
INPAINT_IMAGE,
|
||||
INPAINT_IMAGE_RESIZE_DOWN,
|
||||
INPAINT_IMAGE_RESIZE_UP,
|
||||
@ -127,6 +129,12 @@ export const buildCanvasInpaintGraph = (
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
},
|
||||
[INPAINT_CREATE_MASK]: {
|
||||
type: 'create_denoise_mask',
|
||||
id: INPAINT_CREATE_MASK,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
},
|
||||
[NOISE]: {
|
||||
type: 'noise',
|
||||
id: NOISE,
|
||||
@ -276,16 +284,27 @@ export const buildCanvasInpaintGraph = (
|
||||
field: 'latents',
|
||||
},
|
||||
},
|
||||
// Create Inpaint Mask
|
||||
{
|
||||
source: {
|
||||
node_id: MASK_BLUR,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: DENOISE_LATENTS,
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'mask',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'denoise_mask',
|
||||
},
|
||||
destination: {
|
||||
node_id: DENOISE_LATENTS,
|
||||
field: 'denoise_mask',
|
||||
},
|
||||
},
|
||||
// Iterate
|
||||
{
|
||||
source: {
|
||||
@ -459,6 +478,16 @@ export const buildCanvasInpaintGraph = (
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: INPAINT_IMAGE_RESIZE_UP,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
// Color Correct The Inpainted Result
|
||||
{
|
||||
source: {
|
||||
@ -516,6 +545,10 @@ export const buildCanvasInpaintGraph = (
|
||||
...(graph.nodes[MASK_BLUR] as ImageBlurInvocation),
|
||||
image: canvasMaskImage,
|
||||
};
|
||||
graph.nodes[INPAINT_CREATE_MASK] = {
|
||||
...(graph.nodes[INPAINT_CREATE_MASK] as CreateDenoiseMaskInvocation),
|
||||
image: canvasInitImage,
|
||||
};
|
||||
|
||||
graph.edges.push(
|
||||
// Color Correct The Inpainted Result
|
||||
|
@ -17,13 +17,14 @@ import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
|
||||
import { addVAEToGraph } from './addVAEToGraph';
|
||||
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
|
||||
import {
|
||||
CANVAS_OUTPAINT_GRAPH,
|
||||
CANVAS_OUTPUT,
|
||||
CANVAS_COHERENCE_DENOISE_LATENTS,
|
||||
CANVAS_COHERENCE_NOISE,
|
||||
CANVAS_COHERENCE_NOISE_INCREMENT,
|
||||
CANVAS_OUTPAINT_GRAPH,
|
||||
CANVAS_OUTPUT,
|
||||
CLIP_SKIP,
|
||||
DENOISE_LATENTS,
|
||||
INPAINT_CREATE_MASK,
|
||||
INPAINT_IMAGE,
|
||||
INPAINT_IMAGE_RESIZE_DOWN,
|
||||
INPAINT_IMAGE_RESIZE_UP,
|
||||
@ -153,6 +154,12 @@ export const buildCanvasOutpaintGraph = (
|
||||
use_cpu,
|
||||
is_intermediate: true,
|
||||
},
|
||||
[INPAINT_CREATE_MASK]: {
|
||||
type: 'create_denoise_mask',
|
||||
id: INPAINT_CREATE_MASK,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
},
|
||||
[DENOISE_LATENTS]: {
|
||||
type: 'denoise_latents',
|
||||
id: DENOISE_LATENTS,
|
||||
@ -317,16 +324,27 @@ export const buildCanvasOutpaintGraph = (
|
||||
field: 'latents',
|
||||
},
|
||||
},
|
||||
// Create Inpaint Mask
|
||||
{
|
||||
source: {
|
||||
node_id: MASK_BLUR,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: DENOISE_LATENTS,
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'mask',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'denoise_mask',
|
||||
},
|
||||
destination: {
|
||||
node_id: DENOISE_LATENTS,
|
||||
field: 'denoise_mask',
|
||||
},
|
||||
},
|
||||
// Iterate
|
||||
{
|
||||
source: {
|
||||
@ -522,6 +540,16 @@ export const buildCanvasOutpaintGraph = (
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: INPAINT_INFILL,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
// Take combined mask and resize and then blur
|
||||
{
|
||||
source: {
|
||||
@ -640,6 +668,16 @@ export const buildCanvasOutpaintGraph = (
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: INPAINT_INFILL,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
// Color Correct The Inpainted Result
|
||||
{
|
||||
source: {
|
||||
|
@ -2,11 +2,7 @@ import { logger } from 'app/logging/logger';
|
||||
import { RootState } from 'app/store/store';
|
||||
import { NonNullableGraph } from 'features/nodes/types/types';
|
||||
import { initialGenerationState } from 'features/parameters/store/generationSlice';
|
||||
import {
|
||||
ImageDTO,
|
||||
ImageResizeInvocation,
|
||||
ImageToLatentsInvocation,
|
||||
} from 'services/api/types';
|
||||
import { ImageDTO, ImageToLatentsInvocation } from 'services/api/types';
|
||||
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
|
||||
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
|
||||
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
|
||||
@ -17,11 +13,12 @@ import { addWatermarkerToGraph } from './addWatermarkerToGraph';
|
||||
import {
|
||||
CANVAS_OUTPUT,
|
||||
IMAGE_TO_LATENTS,
|
||||
IMG2IMG_RESIZE,
|
||||
LATENTS_TO_IMAGE,
|
||||
METADATA_ACCUMULATOR,
|
||||
NEGATIVE_CONDITIONING,
|
||||
NOISE,
|
||||
POSITIVE_CONDITIONING,
|
||||
RESIZE,
|
||||
SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH,
|
||||
SDXL_DENOISE_LATENTS,
|
||||
SDXL_MODEL_LOADER,
|
||||
@ -59,7 +56,15 @@ export const buildCanvasSDXLImageToImageGraph = (
|
||||
// The bounding box determines width and height, not the width and height params
|
||||
const { width, height } = state.canvas.boundingBoxDimensions;
|
||||
|
||||
const { shouldAutoSave } = state.canvas;
|
||||
const {
|
||||
scaledBoundingBoxDimensions,
|
||||
boundingBoxScaleMethod,
|
||||
shouldAutoSave,
|
||||
} = state.canvas;
|
||||
|
||||
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||
boundingBoxScaleMethod
|
||||
);
|
||||
|
||||
if (!model) {
|
||||
log.error('No model found in state');
|
||||
@ -109,16 +114,18 @@ export const buildCanvasSDXLImageToImageGraph = (
|
||||
id: NOISE,
|
||||
is_intermediate: true,
|
||||
use_cpu,
|
||||
width: !isUsingScaledDimensions
|
||||
? width
|
||||
: scaledBoundingBoxDimensions.width,
|
||||
height: !isUsingScaledDimensions
|
||||
? height
|
||||
: scaledBoundingBoxDimensions.height,
|
||||
},
|
||||
[IMAGE_TO_LATENTS]: {
|
||||
type: 'i2l',
|
||||
id: IMAGE_TO_LATENTS,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
// must be set manually later, bc `fit` parameter may require a resize node inserted
|
||||
// image: {
|
||||
// image_name: initialImage.image_name,
|
||||
// },
|
||||
},
|
||||
[SDXL_DENOISE_LATENTS]: {
|
||||
type: 'denoise_latents',
|
||||
@ -132,12 +139,6 @@ export const buildCanvasSDXLImageToImageGraph = (
|
||||
: 1 - strength,
|
||||
denoising_end: shouldUseSDXLRefiner ? refinerStart : 1,
|
||||
},
|
||||
[CANVAS_OUTPUT]: {
|
||||
type: 'l2i',
|
||||
id: CANVAS_OUTPUT,
|
||||
is_intermediate: !shouldAutoSave,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
},
|
||||
},
|
||||
edges: [
|
||||
// Connect Model Loader To UNet & CLIP
|
||||
@ -232,82 +233,84 @@ export const buildCanvasSDXLImageToImageGraph = (
|
||||
field: 'latents',
|
||||
},
|
||||
},
|
||||
// Decode denoised latents to an image
|
||||
],
|
||||
};
|
||||
|
||||
// Decode Latents To Image & Handle Scaled Before Processing
|
||||
if (isUsingScaledDimensions) {
|
||||
graph.nodes[IMG2IMG_RESIZE] = {
|
||||
id: IMG2IMG_RESIZE,
|
||||
type: 'img_resize',
|
||||
is_intermediate: true,
|
||||
image: initialImage,
|
||||
width: scaledBoundingBoxDimensions.width,
|
||||
height: scaledBoundingBoxDimensions.height,
|
||||
};
|
||||
graph.nodes[LATENTS_TO_IMAGE] = {
|
||||
id: LATENTS_TO_IMAGE,
|
||||
type: 'l2i',
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
};
|
||||
graph.nodes[CANVAS_OUTPUT] = {
|
||||
id: CANVAS_OUTPUT,
|
||||
type: 'img_resize',
|
||||
is_intermediate: !shouldAutoSave,
|
||||
width: width,
|
||||
height: height,
|
||||
};
|
||||
|
||||
graph.edges.push(
|
||||
{
|
||||
source: {
|
||||
node_id: IMG2IMG_RESIZE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: IMAGE_TO_LATENTS,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: SDXL_DENOISE_LATENTS,
|
||||
field: 'latents',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_OUTPUT,
|
||||
node_id: LATENTS_TO_IMAGE,
|
||||
field: 'latents',
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
// handle `fit`
|
||||
if (initialImage.width !== width || initialImage.height !== height) {
|
||||
// The init image needs to be resized to the specified width and height before being passed to `IMAGE_TO_LATENTS`
|
||||
|
||||
// Create a resize node, explicitly setting its image
|
||||
const resizeNode: ImageResizeInvocation = {
|
||||
id: RESIZE,
|
||||
type: 'img_resize',
|
||||
image: {
|
||||
image_name: initialImage.image_name,
|
||||
},
|
||||
is_intermediate: true,
|
||||
width,
|
||||
height,
|
||||
};
|
||||
|
||||
graph.nodes[RESIZE] = resizeNode;
|
||||
|
||||
// The `RESIZE` node then passes its image to `IMAGE_TO_LATENTS`
|
||||
graph.edges.push({
|
||||
source: { node_id: RESIZE, field: 'image' },
|
||||
destination: {
|
||||
node_id: IMAGE_TO_LATENTS,
|
||||
field: 'image',
|
||||
},
|
||||
});
|
||||
|
||||
// The `RESIZE` node also passes its width and height to `NOISE`
|
||||
graph.edges.push({
|
||||
source: { node_id: RESIZE, field: 'width' },
|
||||
destination: {
|
||||
node_id: NOISE,
|
||||
field: 'width',
|
||||
},
|
||||
});
|
||||
|
||||
graph.edges.push({
|
||||
source: { node_id: RESIZE, field: 'height' },
|
||||
destination: {
|
||||
node_id: NOISE,
|
||||
field: 'height',
|
||||
},
|
||||
});
|
||||
{
|
||||
source: {
|
||||
node_id: LATENTS_TO_IMAGE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_OUTPUT,
|
||||
field: 'image',
|
||||
},
|
||||
}
|
||||
);
|
||||
} else {
|
||||
// We are not resizing, so we need to set the image on the `IMAGE_TO_LATENTS` node explicitly
|
||||
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image = {
|
||||
image_name: initialImage.image_name,
|
||||
graph.nodes[CANVAS_OUTPUT] = {
|
||||
type: 'l2i',
|
||||
id: CANVAS_OUTPUT,
|
||||
is_intermediate: !shouldAutoSave,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
};
|
||||
|
||||
// Pass the image's dimensions to the `NOISE` node
|
||||
(graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image =
|
||||
initialImage;
|
||||
|
||||
graph.edges.push({
|
||||
source: { node_id: IMAGE_TO_LATENTS, field: 'width' },
|
||||
destination: {
|
||||
node_id: NOISE,
|
||||
field: 'width',
|
||||
source: {
|
||||
node_id: SDXL_DENOISE_LATENTS,
|
||||
field: 'latents',
|
||||
},
|
||||
});
|
||||
graph.edges.push({
|
||||
source: { node_id: IMAGE_TO_LATENTS, field: 'height' },
|
||||
destination: {
|
||||
node_id: NOISE,
|
||||
field: 'height',
|
||||
node_id: CANVAS_OUTPUT,
|
||||
field: 'latents',
|
||||
},
|
||||
});
|
||||
}
|
||||
@ -318,8 +321,10 @@ export const buildCanvasSDXLImageToImageGraph = (
|
||||
type: 'metadata_accumulator',
|
||||
generation_mode: 'img2img',
|
||||
cfg_scale,
|
||||
height,
|
||||
width,
|
||||
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
|
||||
height: !isUsingScaledDimensions
|
||||
? height
|
||||
: scaledBoundingBoxDimensions.height,
|
||||
positive_prompt: '', // set in addDynamicPromptsToGraph
|
||||
negative_prompt: negativePrompt,
|
||||
model,
|
||||
|
@ -2,6 +2,7 @@ import { logger } from 'app/logging/logger';
|
||||
import { RootState } from 'app/store/store';
|
||||
import { NonNullableGraph } from 'features/nodes/types/types';
|
||||
import {
|
||||
CreateDenoiseMaskInvocation,
|
||||
ImageBlurInvocation,
|
||||
ImageDTO,
|
||||
ImageToLatentsInvocation,
|
||||
@ -16,10 +17,11 @@ import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph';
|
||||
import { addVAEToGraph } from './addVAEToGraph';
|
||||
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
|
||||
import {
|
||||
CANVAS_OUTPUT,
|
||||
CANVAS_COHERENCE_DENOISE_LATENTS,
|
||||
CANVAS_COHERENCE_NOISE,
|
||||
CANVAS_COHERENCE_NOISE_INCREMENT,
|
||||
CANVAS_OUTPUT,
|
||||
INPAINT_CREATE_MASK,
|
||||
INPAINT_IMAGE,
|
||||
INPAINT_IMAGE_RESIZE_DOWN,
|
||||
INPAINT_IMAGE_RESIZE_UP,
|
||||
@ -136,6 +138,12 @@ export const buildCanvasSDXLInpaintGraph = (
|
||||
use_cpu,
|
||||
is_intermediate: true,
|
||||
},
|
||||
[INPAINT_CREATE_MASK]: {
|
||||
type: 'create_denoise_mask',
|
||||
id: INPAINT_CREATE_MASK,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
},
|
||||
[SDXL_DENOISE_LATENTS]: {
|
||||
type: 'denoise_latents',
|
||||
id: SDXL_DENOISE_LATENTS,
|
||||
@ -290,16 +298,27 @@ export const buildCanvasSDXLInpaintGraph = (
|
||||
field: 'latents',
|
||||
},
|
||||
},
|
||||
// Create Inpaint Mask
|
||||
{
|
||||
source: {
|
||||
node_id: MASK_BLUR,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: SDXL_DENOISE_LATENTS,
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'mask',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'denoise_mask',
|
||||
},
|
||||
destination: {
|
||||
node_id: SDXL_DENOISE_LATENTS,
|
||||
field: 'denoise_mask',
|
||||
},
|
||||
},
|
||||
// Iterate
|
||||
{
|
||||
source: {
|
||||
@ -473,6 +492,16 @@ export const buildCanvasSDXLInpaintGraph = (
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: INPAINT_IMAGE_RESIZE_UP,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
// Color Correct The Inpainted Result
|
||||
{
|
||||
source: {
|
||||
@ -530,6 +559,10 @@ export const buildCanvasSDXLInpaintGraph = (
|
||||
...(graph.nodes[MASK_BLUR] as ImageBlurInvocation),
|
||||
image: canvasMaskImage,
|
||||
};
|
||||
graph.nodes[INPAINT_CREATE_MASK] = {
|
||||
...(graph.nodes[INPAINT_CREATE_MASK] as CreateDenoiseMaskInvocation),
|
||||
image: canvasInitImage,
|
||||
};
|
||||
|
||||
graph.edges.push(
|
||||
// Color Correct The Inpainted Result
|
||||
|
@ -18,10 +18,11 @@ import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph';
|
||||
import { addVAEToGraph } from './addVAEToGraph';
|
||||
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
|
||||
import {
|
||||
CANVAS_OUTPUT,
|
||||
CANVAS_COHERENCE_DENOISE_LATENTS,
|
||||
CANVAS_COHERENCE_NOISE,
|
||||
CANVAS_COHERENCE_NOISE_INCREMENT,
|
||||
CANVAS_OUTPUT,
|
||||
INPAINT_CREATE_MASK,
|
||||
INPAINT_IMAGE,
|
||||
INPAINT_IMAGE_RESIZE_DOWN,
|
||||
INPAINT_IMAGE_RESIZE_UP,
|
||||
@ -156,6 +157,12 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
use_cpu,
|
||||
is_intermediate: true,
|
||||
},
|
||||
[INPAINT_CREATE_MASK]: {
|
||||
type: 'create_denoise_mask',
|
||||
id: INPAINT_CREATE_MASK,
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
},
|
||||
[SDXL_DENOISE_LATENTS]: {
|
||||
type: 'denoise_latents',
|
||||
id: SDXL_DENOISE_LATENTS,
|
||||
@ -331,16 +338,27 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
field: 'latents',
|
||||
},
|
||||
},
|
||||
// Create Inpaint Mask
|
||||
{
|
||||
source: {
|
||||
node_id: MASK_BLUR,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: SDXL_DENOISE_LATENTS,
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'mask',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'denoise_mask',
|
||||
},
|
||||
destination: {
|
||||
node_id: SDXL_DENOISE_LATENTS,
|
||||
field: 'denoise_mask',
|
||||
},
|
||||
},
|
||||
// Iterate
|
||||
{
|
||||
source: {
|
||||
@ -537,6 +555,16 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: INPAINT_INFILL,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
// Take combined mask and resize and then blur
|
||||
{
|
||||
source: {
|
||||
@ -655,6 +683,16 @@ export const buildCanvasSDXLOutpaintGraph = (
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: INPAINT_INFILL,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: INPAINT_CREATE_MASK,
|
||||
field: 'image',
|
||||
},
|
||||
},
|
||||
// Color Correct The Inpainted Result
|
||||
{
|
||||
source: {
|
||||
|
@ -15,6 +15,7 @@ import { addVAEToGraph } from './addVAEToGraph';
|
||||
import { addWatermarkerToGraph } from './addWatermarkerToGraph';
|
||||
import {
|
||||
CANVAS_OUTPUT,
|
||||
LATENTS_TO_IMAGE,
|
||||
METADATA_ACCUMULATOR,
|
||||
NEGATIVE_CONDITIONING,
|
||||
NOISE,
|
||||
@ -49,7 +50,15 @@ export const buildCanvasSDXLTextToImageGraph = (
|
||||
// The bounding box determines width and height, not the width and height params
|
||||
const { width, height } = state.canvas.boundingBoxDimensions;
|
||||
|
||||
const { shouldAutoSave } = state.canvas;
|
||||
const {
|
||||
scaledBoundingBoxDimensions,
|
||||
boundingBoxScaleMethod,
|
||||
shouldAutoSave,
|
||||
} = state.canvas;
|
||||
|
||||
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||
boundingBoxScaleMethod
|
||||
);
|
||||
|
||||
const { shouldUseSDXLRefiner, refinerStart, shouldConcatSDXLStylePrompt } =
|
||||
state.sdxl;
|
||||
@ -136,17 +145,15 @@ export const buildCanvasSDXLTextToImageGraph = (
|
||||
type: 'noise',
|
||||
id: NOISE,
|
||||
is_intermediate: true,
|
||||
width,
|
||||
height,
|
||||
width: !isUsingScaledDimensions
|
||||
? width
|
||||
: scaledBoundingBoxDimensions.width,
|
||||
height: !isUsingScaledDimensions
|
||||
? height
|
||||
: scaledBoundingBoxDimensions.height,
|
||||
use_cpu,
|
||||
},
|
||||
[t2lNode.id]: t2lNode,
|
||||
[CANVAS_OUTPUT]: {
|
||||
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
|
||||
id: CANVAS_OUTPUT,
|
||||
is_intermediate: !shouldAutoSave,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
},
|
||||
},
|
||||
edges: [
|
||||
// Connect Model Loader to UNet and CLIP
|
||||
@ -231,19 +238,67 @@ export const buildCanvasSDXLTextToImageGraph = (
|
||||
field: 'noise',
|
||||
},
|
||||
},
|
||||
// Decode Denoised Latents To Image
|
||||
],
|
||||
};
|
||||
|
||||
// Decode Latents To Image & Handle Scaled Before Processing
|
||||
if (isUsingScaledDimensions) {
|
||||
graph.nodes[LATENTS_TO_IMAGE] = {
|
||||
id: LATENTS_TO_IMAGE,
|
||||
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
};
|
||||
|
||||
graph.nodes[CANVAS_OUTPUT] = {
|
||||
id: CANVAS_OUTPUT,
|
||||
type: 'img_resize',
|
||||
is_intermediate: !shouldAutoSave,
|
||||
width: width,
|
||||
height: height,
|
||||
};
|
||||
|
||||
graph.edges.push(
|
||||
{
|
||||
source: {
|
||||
node_id: SDXL_DENOISE_LATENTS,
|
||||
field: 'latents',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_OUTPUT,
|
||||
node_id: LATENTS_TO_IMAGE,
|
||||
field: 'latents',
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
{
|
||||
source: {
|
||||
node_id: LATENTS_TO_IMAGE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_OUTPUT,
|
||||
field: 'image',
|
||||
},
|
||||
}
|
||||
);
|
||||
} else {
|
||||
graph.nodes[CANVAS_OUTPUT] = {
|
||||
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
|
||||
id: CANVAS_OUTPUT,
|
||||
is_intermediate: !shouldAutoSave,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
};
|
||||
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: SDXL_DENOISE_LATENTS,
|
||||
field: 'latents',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_OUTPUT,
|
||||
field: 'latents',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// add metadata accumulator, which is only mostly populated - some fields are added later
|
||||
graph.nodes[METADATA_ACCUMULATOR] = {
|
||||
@ -251,8 +306,10 @@ export const buildCanvasSDXLTextToImageGraph = (
|
||||
type: 'metadata_accumulator',
|
||||
generation_mode: 'txt2img',
|
||||
cfg_scale,
|
||||
height,
|
||||
width,
|
||||
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
|
||||
height: !isUsingScaledDimensions
|
||||
? height
|
||||
: scaledBoundingBoxDimensions.height,
|
||||
positive_prompt: '', // set in addDynamicPromptsToGraph
|
||||
negative_prompt: negativePrompt,
|
||||
model,
|
||||
|
@ -17,6 +17,7 @@ import {
|
||||
CANVAS_TEXT_TO_IMAGE_GRAPH,
|
||||
CLIP_SKIP,
|
||||
DENOISE_LATENTS,
|
||||
LATENTS_TO_IMAGE,
|
||||
MAIN_MODEL_LOADER,
|
||||
METADATA_ACCUMULATOR,
|
||||
NEGATIVE_CONDITIONING,
|
||||
@ -39,6 +40,7 @@ export const buildCanvasTextToImageGraph = (
|
||||
cfgScale: cfg_scale,
|
||||
scheduler,
|
||||
steps,
|
||||
vaePrecision,
|
||||
clipSkip,
|
||||
shouldUseCpuNoise,
|
||||
shouldUseNoiseSettings,
|
||||
@ -47,7 +49,15 @@ export const buildCanvasTextToImageGraph = (
|
||||
// The bounding box determines width and height, not the width and height params
|
||||
const { width, height } = state.canvas.boundingBoxDimensions;
|
||||
|
||||
const { shouldAutoSave } = state.canvas;
|
||||
const {
|
||||
scaledBoundingBoxDimensions,
|
||||
boundingBoxScaleMethod,
|
||||
shouldAutoSave,
|
||||
} = state.canvas;
|
||||
|
||||
const isUsingScaledDimensions = ['auto', 'manual'].includes(
|
||||
boundingBoxScaleMethod
|
||||
);
|
||||
|
||||
if (!model) {
|
||||
log.error('No model found in state');
|
||||
@ -131,16 +141,15 @@ export const buildCanvasTextToImageGraph = (
|
||||
type: 'noise',
|
||||
id: NOISE,
|
||||
is_intermediate: true,
|
||||
width,
|
||||
height,
|
||||
width: !isUsingScaledDimensions
|
||||
? width
|
||||
: scaledBoundingBoxDimensions.width,
|
||||
height: !isUsingScaledDimensions
|
||||
? height
|
||||
: scaledBoundingBoxDimensions.height,
|
||||
use_cpu,
|
||||
},
|
||||
[t2lNode.id]: t2lNode,
|
||||
[CANVAS_OUTPUT]: {
|
||||
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
|
||||
id: CANVAS_OUTPUT,
|
||||
is_intermediate: !shouldAutoSave,
|
||||
},
|
||||
},
|
||||
edges: [
|
||||
// Connect Model Loader to UNet & CLIP Skip
|
||||
@ -216,19 +225,67 @@ export const buildCanvasTextToImageGraph = (
|
||||
field: 'noise',
|
||||
},
|
||||
},
|
||||
// Decode denoised latents to image
|
||||
],
|
||||
};
|
||||
|
||||
// Decode Latents To Image & Handle Scaled Before Processing
|
||||
if (isUsingScaledDimensions) {
|
||||
graph.nodes[LATENTS_TO_IMAGE] = {
|
||||
id: LATENTS_TO_IMAGE,
|
||||
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
|
||||
is_intermediate: true,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
};
|
||||
|
||||
graph.nodes[CANVAS_OUTPUT] = {
|
||||
id: CANVAS_OUTPUT,
|
||||
type: 'img_resize',
|
||||
is_intermediate: !shouldAutoSave,
|
||||
width: width,
|
||||
height: height,
|
||||
};
|
||||
|
||||
graph.edges.push(
|
||||
{
|
||||
source: {
|
||||
node_id: DENOISE_LATENTS,
|
||||
field: 'latents',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_OUTPUT,
|
||||
node_id: LATENTS_TO_IMAGE,
|
||||
field: 'latents',
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
{
|
||||
source: {
|
||||
node_id: LATENTS_TO_IMAGE,
|
||||
field: 'image',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_OUTPUT,
|
||||
field: 'image',
|
||||
},
|
||||
}
|
||||
);
|
||||
} else {
|
||||
graph.nodes[CANVAS_OUTPUT] = {
|
||||
type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i',
|
||||
id: CANVAS_OUTPUT,
|
||||
is_intermediate: !shouldAutoSave,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
};
|
||||
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: DENOISE_LATENTS,
|
||||
field: 'latents',
|
||||
},
|
||||
destination: {
|
||||
node_id: CANVAS_OUTPUT,
|
||||
field: 'latents',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// add metadata accumulator, which is only mostly populated - some fields are added later
|
||||
graph.nodes[METADATA_ACCUMULATOR] = {
|
||||
@ -236,8 +293,10 @@ export const buildCanvasTextToImageGraph = (
|
||||
type: 'metadata_accumulator',
|
||||
generation_mode: 'txt2img',
|
||||
cfg_scale,
|
||||
height,
|
||||
width,
|
||||
width: !isUsingScaledDimensions ? width : scaledBoundingBoxDimensions.width,
|
||||
height: !isUsingScaledDimensions
|
||||
? height
|
||||
: scaledBoundingBoxDimensions.height,
|
||||
positive_prompt: '', // set in addDynamicPromptsToGraph
|
||||
negative_prompt: negativePrompt,
|
||||
model,
|
||||
|
@ -17,6 +17,7 @@ export const CLIP_SKIP = 'clip_skip';
|
||||
export const IMAGE_TO_LATENTS = 'image_to_latents';
|
||||
export const LATENTS_TO_LATENTS = 'latents_to_latents';
|
||||
export const RESIZE = 'resize_image';
|
||||
export const IMG2IMG_RESIZE = 'img2img_resize';
|
||||
export const CANVAS_OUTPUT = 'canvas_output';
|
||||
export const INPAINT_IMAGE = 'inpaint_image';
|
||||
export const SCALED_INPAINT_IMAGE = 'scaled_inpaint_image';
|
||||
@ -25,6 +26,7 @@ export const INPAINT_IMAGE_RESIZE_DOWN = 'inpaint_image_resize_down';
|
||||
export const INPAINT_INFILL = 'inpaint_infill';
|
||||
export const INPAINT_INFILL_RESIZE_DOWN = 'inpaint_infill_resize_down';
|
||||
export const INPAINT_FINAL_IMAGE = 'inpaint_final_image';
|
||||
export const INPAINT_CREATE_MASK = 'inpaint_create_mask';
|
||||
export const CANVAS_COHERENCE_DENOISE_LATENTS =
|
||||
'canvas_coherence_denoise_latents';
|
||||
export const CANVAS_COHERENCE_NOISE = 'canvas_coherence_noise';
|
||||
|
120
invokeai/frontend/web/src/services/api/schema.d.ts
vendored
120
invokeai/frontend/web/src/services/api/schema.d.ts
vendored
File diff suppressed because one or more lines are too long
@ -111,6 +111,7 @@ export type ImageBlurInvocation = s['ImageBlurInvocation'];
|
||||
export type ImageScaleInvocation = s['ImageScaleInvocation'];
|
||||
export type InfillPatchMatchInvocation = s['InfillPatchMatchInvocation'];
|
||||
export type InfillTileInvocation = s['InfillTileInvocation'];
|
||||
export type CreateDenoiseMaskInvocation = s['CreateDenoiseMaskInvocation'];
|
||||
export type RandomIntInvocation = s['RandomIntInvocation'];
|
||||
export type CompelInvocation = s['CompelInvocation'];
|
||||
export type DynamicPromptInvocation = s['DynamicPromptInvocation'];
|
||||
|
Reference in New Issue
Block a user