From 0257b4a611baf312d20a92cf8b5cd5ce10c55c78 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 15 Jul 2023 00:13:45 +1000 Subject: [PATCH 01/12] fix(ui): fix mouse interactions --- .../components/IAINode/IAINodeHeader.tsx | 2 ++ .../components/IAINode/IAINodeInputs.tsx | 27 ++++++++++--------- .../nodes/components/InvocationComponent.tsx | 11 +++++++- .../nodes/components/ProgressImageNode.tsx | 2 ++ .../nodes/hooks/useBuildInvocation.ts | 8 ++++++ 5 files changed, 36 insertions(+), 14 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/components/IAINode/IAINodeHeader.tsx b/invokeai/frontend/web/src/features/nodes/components/IAINode/IAINodeHeader.tsx index 73705769b6..226aaed7be 100644 --- a/invokeai/frontend/web/src/features/nodes/components/IAINode/IAINodeHeader.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/IAINode/IAINodeHeader.tsx @@ -1,4 +1,5 @@ import { Flex, Heading, Icon, Tooltip } from '@chakra-ui/react'; +import { DRAG_HANDLE_CLASSNAME } from 'features/nodes/hooks/useBuildInvocation'; import { memo } from 'react'; import { FaInfoCircle } from 'react-icons/fa'; @@ -12,6 +13,7 @@ const IAINodeHeader = (props: IAINodeHeaderProps) => { const { nodeId, title, description } = props; return ( { }); return ( - + {IAINodeInputsToRender} ); diff --git a/invokeai/frontend/web/src/features/nodes/components/InvocationComponent.tsx b/invokeai/frontend/web/src/features/nodes/components/InvocationComponent.tsx index 12817679e2..3a08b46dde 100644 --- a/invokeai/frontend/web/src/features/nodes/components/InvocationComponent.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/InvocationComponent.tsx @@ -23,7 +23,14 @@ export const InvocationComponent = memo((props: NodeProps) => { if (!template) { return ( - + ) => { description={template.description} /> diff --git a/invokeai/frontend/web/src/features/nodes/components/ProgressImageNode.tsx b/invokeai/frontend/web/src/features/nodes/components/ProgressImageNode.tsx index 6424d4f76c..2975cd820c 100644 --- a/invokeai/frontend/web/src/features/nodes/components/ProgressImageNode.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/ProgressImageNode.tsx @@ -21,7 +21,9 @@ const ProgressImageNode = (props: NodeProps) => { /> nodes.invocationTemplates ); +export const DRAG_HANDLE_CLASSNAME = 'node-drag-handle'; + +export const SHARED_NODE_PROPERTIES: Partial = { + dragHandle: `.${DRAG_HANDLE_CLASSNAME}`, +}; + export const useBuildInvocation = () => { const invocationTemplates = useAppSelector(templatesSelector); @@ -32,6 +38,7 @@ export const useBuildInvocation = () => { }); const node: Node = { + ...SHARED_NODE_PROPERTIES, id: 'progress_image', type: 'progress_image', position: { x: x, y: y }, @@ -91,6 +98,7 @@ export const useBuildInvocation = () => { }); const invocation: Node = { + ...SHARED_NODE_PROPERTIES, id: nodeId, type: 'invocation', position: { x: x, y: y }, From 30e45eaf47c3cced85ed5175c8430c0c0d05d441 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 15 Jul 2023 00:45:26 +1000 Subject: [PATCH 02/12] feat(ui): hold shift to make nodes draggable from anywhere --- .../src/features/nodes/components/InvocationComponent.tsx | 4 ++-- .../web/src/features/nodes/components/NodeWrapper.tsx | 5 +++++ .../web/src/features/nodes/components/ProgressImageNode.tsx | 1 - 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/components/InvocationComponent.tsx b/invokeai/frontend/web/src/features/nodes/components/InvocationComponent.tsx index 3a08b46dde..608f98d6d2 100644 --- a/invokeai/frontend/web/src/features/nodes/components/InvocationComponent.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/InvocationComponent.tsx @@ -53,14 +53,14 @@ export const InvocationComponent = memo((props: NodeProps) => { description={template.description} /> diff --git a/invokeai/frontend/web/src/features/nodes/components/NodeWrapper.tsx b/invokeai/frontend/web/src/features/nodes/components/NodeWrapper.tsx index 7a76cd5902..dc5a94c267 100644 --- a/invokeai/frontend/web/src/features/nodes/components/NodeWrapper.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/NodeWrapper.tsx @@ -2,6 +2,8 @@ import { Box, useToken } from '@chakra-ui/react'; import { NODE_MIN_WIDTH } from 'app/constants'; import { PropsWithChildren } from 'react'; +import { DRAG_HANDLE_CLASSNAME } from '../hooks/useBuildInvocation'; +import { useAppSelector } from 'app/store/storeHooks'; type NodeWrapperProps = PropsWithChildren & { selected: boolean; @@ -13,8 +15,11 @@ const NodeWrapper = (props: NodeWrapperProps) => { 'dark-lg', ]); + const shift = useAppSelector((state) => state.hotkeys.shift); + return ( ) => { Date: Sat, 15 Jul 2023 01:04:33 +1000 Subject: [PATCH 03/12] fix(ui): allow decimals in number inputs still some jank but eh --- .../src/common/components/IAINumberInput.tsx | 2 +- .../fields/NumberInputFieldComponent.tsx | 36 ++++++++++++++++--- 2 files changed, 32 insertions(+), 6 deletions(-) diff --git a/invokeai/frontend/web/src/common/components/IAINumberInput.tsx b/invokeai/frontend/web/src/common/components/IAINumberInput.tsx index 8f675cc148..de3b44564a 100644 --- a/invokeai/frontend/web/src/common/components/IAINumberInput.tsx +++ b/invokeai/frontend/web/src/common/components/IAINumberInput.tsx @@ -28,7 +28,7 @@ import { useState, } from 'react'; -const numberStringRegex = /^-?(0\.)?\.?$/; +export const numberStringRegex = /^-?(0\.)?\.?$/; interface Props extends Omit { label?: string; diff --git a/invokeai/frontend/web/src/features/nodes/components/fields/NumberInputFieldComponent.tsx b/invokeai/frontend/web/src/features/nodes/components/fields/NumberInputFieldComponent.tsx index f5df8989f5..50d69a6496 100644 --- a/invokeai/frontend/web/src/features/nodes/components/fields/NumberInputFieldComponent.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/fields/NumberInputFieldComponent.tsx @@ -6,6 +6,7 @@ import { NumberInputStepper, } from '@chakra-ui/react'; import { useAppDispatch } from 'app/store/storeHooks'; +import { numberStringRegex } from 'common/components/IAINumberInput'; import { fieldValueChanged } from 'features/nodes/store/nodesSlice'; import { FloatInputFieldTemplate, @@ -13,7 +14,7 @@ import { IntegerInputFieldTemplate, IntegerInputFieldValue, } from 'features/nodes/types/types'; -import { memo } from 'react'; +import { memo, useEffect, useState } from 'react'; import { FieldComponentProps } from './types'; const NumberInputFieldComponent = ( @@ -23,17 +24,42 @@ const NumberInputFieldComponent = ( > ) => { const { nodeId, field } = props; - const dispatch = useAppDispatch(); + const [valueAsString, setValueAsString] = useState( + String(field.value) + ); - const handleValueChanged = (_: string, value: number) => { - dispatch(fieldValueChanged({ nodeId, fieldName: field.name, value })); + const handleValueChanged = (v: string) => { + setValueAsString(v); + // This allows negatives and decimals e.g. '-123', `.5`, `-0.2`, etc. + if (!v.match(numberStringRegex)) { + // Cast the value to number. Floor it if it should be an integer. + dispatch( + fieldValueChanged({ + nodeId, + fieldName: field.name, + value: + props.template.type === 'integer' + ? Math.floor(Number(v)) + : Number(v), + }) + ); + } }; + useEffect(() => { + if ( + !valueAsString.match(numberStringRegex) && + field.value !== Number(valueAsString) + ) { + setValueAsString(String(field.value)); + } + }, [field.value, valueAsString]); + return ( From 19cbda56b6720829eed8d596c41adbdb203e301b Mon Sep 17 00:00:00 2001 From: ymgenesis Date: Tue, 11 Jul 2023 14:50:22 +0200 Subject: [PATCH 04/12] Create NODES.md Introductory nodes documentation --- docs/features/NODES.md | 46 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 docs/features/NODES.md diff --git a/docs/features/NODES.md b/docs/features/NODES.md new file mode 100644 index 0000000000..1632472912 --- /dev/null +++ b/docs/features/NODES.md @@ -0,0 +1,46 @@ +# Nodes Editor + +The nodes editor is a blank canvas where you add modular node windows for image generation. The node processing flow is usually done from left to right, though linearity can become abstracted the more complex the node graph becomes. Nodes are connected via wires/noodles. + +To better understand how nodes are used, think of how an electric power bar works. It takes in one input (electricity from a wall outlet) and passes it to multiple devices through multiple outputs. Similarly, a node could have multiple inputs and outputs functioning at the same (or different) time, but all node outputs pass information onwards like a power bar passes electricity. Not all outputs are compatible with all inputs, however, much like a power bar can’t take in spaghetti noodles instead of electricity. In general, node outputs are colour-coded to match compatible inputs of other nodes. + +## Anatomy of a Node + +Individual nodes are made up of the following: + +- Inputs: Edge points on the left side of the node window where you connect outputs from other nodes. +- Outputs: Edge points on the right side of the node window where you connect to inputs on other nodes. +- Options: Various options which are either manually configured, or overridden by connecting an output from another node to the input. + +## Diffusion Overview + +Taking the time to understand the diffusion process will help you to understand how to setup your nodes in the nodes editor. + +There are two main spaces Stable Diffusion works in: image space and latent space. + +Image space represents images in pixel-form that you look at. Latent space represents compressed inputs. It’s in latent space that Stable Diffusion processes images. A VAE (Variational Auto Encoder) is responsible for compressing and encoding inputs into latent space, as well as decoding outputs back into image space. + +When you generate an image using text-to-image, multiple steps occur in latent space: +1. Random noise is generated at the chosen height and width. The noise’s characteristics are dictated by the chosen (or not chosen) seed. This noise tensor is passed into latent space. We’ll call this noise A. +1. Using a model’s U-Net, a noise predictor examines noise A, and the words tokenized by CLIP from your prompt (conditioning). It generates its own noise tensor to predict what the final image might look like in latent space. We’ll call this noise B. +1. Noise B is subtracted from noise A in an attempt to create a final latent image indicative of the inputs. This step is repeated for the number of sampler steps chosen. +1. The VAE decodes the final latent image from latent space into image space. + +image-to-image is a similar process, with only step 1 being different: +1. The input image is decoded from image space into latent space by the VAE. Noise is then added to the input latent image. Denoising Strength dictates how much noise is added, 0 being none, 1 being all-encompassing. We’ll call this noise A. The process is then the same as steps 2-4 in the text-to-image explanation above. + +Furthermore, a model provides the CLIP prompt tokenizer, the VAE, and a U-Net (where noise prediction occurs given a prompt and initial noise tensor). + +A noise scheduler (eg. DPM++ 2M Karras) schedules the subtraction of noise from the latent image across the sampler steps chosen (step 3 above). Less noise is usually subtracted at higher sampler steps. + +## Basic text-to-image Node Graph + +With our knowledge on the diffusion process, let’s break down a basic text-to-image node graph in the nodes editor: + +nodest2i + +- Model Loader: A necessity to generating images (as we’ve read above). We choose our model from the dropdown. It outputs a U-Net, CLIP tokenizer, and VAE. +- Prompt (Compel): Another necessity. Two prompt nodes are created. One will output positive conditioning (what you want, ‘dog’), one will output negative (what you don’t want, ‘cat’). They both input the CLIP tokenizer that the Model Loader node outputs. +- Noise: Consider this noise A from step one of the text-to-image explanation above. Choose a seed number, width, and height. +- TextToLatents: This node takes many inputs for converting and processing text & noise from image space into latent space, hence the name TextTo**Latents**. In this setup, it inputs positive and negative conditioning from the prompt nodes for processing (step 2 above). It inputs noise from the noise node for processing (steps 2 & 3 above). Lastly, it inputs a U-Net from the Model Loader node for processing (step 2 above). It outputs latents for use in the next LatentsToImage node. Choose number of sampler steps, CFG scale, and scheduler. +- LatentsToImage: This node takes in processed latents from the TextToLatents node, and the model’s VAE from the Model Loader node which is responsible for decoding latents back into the image space, hence the name LatentsTo**Image**. This node is the last stop, and once the image is decoded, it is saved to the gallery. From 3454b7654cc57659e82d2b6f7eb3ac114ad07cbe Mon Sep 17 00:00:00 2001 From: ymgenesis Date: Tue, 11 Jul 2023 17:28:17 +0200 Subject: [PATCH 05/12] add i2i and controlnet examples Added examples for img2img and ControlNet --- docs/features/NODES.md | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/docs/features/NODES.md b/docs/features/NODES.md index 1632472912..786a7fb1c1 100644 --- a/docs/features/NODES.md +++ b/docs/features/NODES.md @@ -33,9 +33,15 @@ Furthermore, a model provides the CLIP prompt tokenizer, the VAE, and a U-Net (w A noise scheduler (eg. DPM++ 2M Karras) schedules the subtraction of noise from the latent image across the sampler steps chosen (step 3 above). Less noise is usually subtracted at higher sampler steps. -## Basic text-to-image Node Graph +## Node Types -With our knowledge on the diffusion process, let’s break down a basic text-to-image node graph in the nodes editor: +_List all nodes with a short explanation of each_ + +## Examples + +With our knowledge on the diffusion process, let’s break down some basic graphs in the nodes editor. Note that a node's options can be overridden by inputs from other nodes. These examples aren't strict rules to follow, and only demonstrate some basic configurations. + +### Basic text-to-image Node Graph nodest2i @@ -44,3 +50,26 @@ With our knowledge on the diffusion process, let’s break down a basic text-to- - Noise: Consider this noise A from step one of the text-to-image explanation above. Choose a seed number, width, and height. - TextToLatents: This node takes many inputs for converting and processing text & noise from image space into latent space, hence the name TextTo**Latents**. In this setup, it inputs positive and negative conditioning from the prompt nodes for processing (step 2 above). It inputs noise from the noise node for processing (steps 2 & 3 above). Lastly, it inputs a U-Net from the Model Loader node for processing (step 2 above). It outputs latents for use in the next LatentsToImage node. Choose number of sampler steps, CFG scale, and scheduler. - LatentsToImage: This node takes in processed latents from the TextToLatents node, and the model’s VAE from the Model Loader node which is responsible for decoding latents back into the image space, hence the name LatentsTo**Image**. This node is the last stop, and once the image is decoded, it is saved to the gallery. + +### Basic image-to-image Node Graph + +nodesi2i + +- Model Loader: Choose a model from the dropdown. +- Prompt (Compel): Two prompt nodes. One positive (dog), one negative (dog). Same CLIP inputs from the Model Loader node as before. +- ImageToLatents: Upload a a source image directly in the node window, via drag'n'drop from the gallery, or passed in as input. The ImageToLatents node inputs the VAE from the Model Loader node to decode the chosen image from image space into latent space, hence the name ImageTo**Latents**. It outputs latents for use in the next LatentsToLatents node. It also outputs the source image's width and height for use in the next Noise node if the final image is to be the same dimensions as the source image. +- Noise: A noise tensor is created with the width and height of the source image, and connected to the next LatentsToLatents node. Notice the width and height fields are overridden by the input from the ImageToLatents width and height outputs. +- LatentsToLatents: The inputs and options are nearly identical to TextToLatents, except that LatentsToLatents also takes latents as an input. Considering our source image is already converted to latents in the last ImageToLatents node, and text + noise are no longer the only inputs to process, we use the LatentsToLatents node. +- LatentsToImage: Like previously, the LatentsToImage node will use the VAE from the Model Loader as input to decode the latents from LatentsToLatents into image space, and save it to the gallery. + +### Basic ControlNet Node Graph + +nodescontrol + +- Model Loader +- Prompt (Compel) +- Noise: Width and height of the CannyImageProcessor ControlNet image is passed in to set the dimensions of the noise passed to TextToLatents. +- CannyImageProcessor: The CannyImageProcessor node is used to process the source image being used as a ControlNet. Each ControlNet processor node applies control in different ways, and has some different options to configure. Width and height are passed to noise, as mentioned. The processed ControlNet image is output to the ControlNet node. +- ControlNet: Select the type of control model. In this case, canny is chosen as the CannyImageProcessor was used to generate the ControlNet image. Configure the control node options, and pass the control output to TextToLatents. +- TextToLatents: Similar to the basic text-to-image example, except ControlNet is passed to the control input edge point. +- LatentsToImage From 759ca317d085c2ff301b66c711d098c43d723ca8 Mon Sep 17 00:00:00 2001 From: ymgenesis Date: Tue, 11 Jul 2023 21:36:42 +0200 Subject: [PATCH 06/12] add node types & functions Add a list of node types and their functions. Functions are just copied text from node descriptions in webui. --- docs/features/NODES.md | 69 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 68 insertions(+), 1 deletion(-) diff --git a/docs/features/NODES.md b/docs/features/NODES.md index 786a7fb1c1..382afb49e8 100644 --- a/docs/features/NODES.md +++ b/docs/features/NODES.md @@ -35,7 +35,74 @@ A noise scheduler (eg. DPM++ 2M Karras) schedules the subtraction of noise from ## Node Types -_List all nodes with a short explanation of each_ +| Node | Function | +| ---------------------------------- | --------------------------------------------------------------------------------------| +| Add | Adds two numbers | +| CannyImageProcessor | Canny edge detection for ControlNet | +| ClipSkip | Skip layers in clip text_encoder model | +| Collect | Collects values into a collection | +| Prompt (Compel) | Parse prompt using compel package to conditioning | +| ContentShuffleImageProcessor | Applies content shuffle processing to image | +| ControlNet | Collects ControlNet info to pass to other nodes | +| CvInpaint | Simple inpaint using opencv | +| Divide | Divides two numbers | +| DynamicPrompt | Parses a prompt using adieyal/dynamic prompt's random or combinatorial generator | +| FloatLinearRange | Creates a range | +| HedImageProcessor | Applies HED edge detection to image | +| ImageBlur | Blurs an image | +| ImageChannel | Gets a channel from an image | +| ImageCollection | Load a collection of images and provide it as output | +| ImageConvert | Converts an image to a different mode | +| ImageCrop | Crops an imiage to a specified box. The box can be outside of the image. | +| ImageInverseLerp | Inverse linear interpolation of all pixels of an image | +| ImageLerp | Linear interpolation of all pixels of an image | +| ImageMultiply | Multiplies two images together using `PIL.ImageChops.Multiply()` | +| ImagePaste | Pastes an image into another image | +| ImageProcessor | Base class for invocations that reprocess images for ControlNet | +| ImageResize | Resizes an image to specific dimensions | +| ImageScale | Scales an image by a factor | +| ImageToLatents | Scales latents by a given factor | +| InfillColor | Infills transparent areas of an image with a solid color | +| InfillPatchMatch | Infills transparent areas of an image using the PatchMatch algorithm | +| InfillTile | Infills transparents areas of an image with tiles of the image | +| Inpaint | Generates an image using inpaint | +| Iterate | Iterates over a list of items | +| LatentsToImage | Generates an image from latents | +| LatentsToLatents | Generates latents using latents as base image | +| LeresImageProcessor | Applies leres processing to image | +| LineartAnimeImageProcessor | Applies line art anime processing to image | +| LineartImageProcessor | Applies line art processing to image | +| LoadImage | Load an image and provide it as output | +| Lora Loader | Apply selected lora to unet and text_encoder | +| Model Loader | Loads a main model, outputting its submodels | +| MaskFromAlpha | Extracts the alpha channel of an image as a mask | +| MediapipeFaceProcessor | Applies mediapipe face processing to image | +| MidasDepthImageProcessor | Applies Midas depth processing to image | +| MlsdImageProcessor | Applied MLSD processing to image | +| Multiply | Multiplies two numbers | +| Noise | Generates latent noise | +| NormalbaeImageProcessor | Applies NormalBAE processing to image | +| OpenposeImageProcessor | Applies Openpose processing to image | +| ParamFloat | A float parameter | +| ParamInt | An integer parameter | +| PidiImageProcessor | Applies PIDI processing to an image | +| Progress Image | Displays the progress image in the Node Editor | +| RandomInit | Outputs a single random integer | +| RandomRange | Creates a collection of random numbers | +| Range | Creates a range of numbers from start to stop with step | +| RangeOfSize | Creates a range from start to start + size with step | +| ResizeLatents | Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8. | +| RestoreFace | Restores faces in the image | +| ScaleLatents | Scales latents by a given factor | +| SegmentAnythingProcessor | Applies segment anything processing to image | +| ShowImage | Displays a provided image, and passes it forward in the pipeline | +| StepParamEasing | Experimental per-step parameter for easing for denoising steps | +| Subtract | Subtracts two numbers | +| TextToLatents | Generates latents from conditionings | +| TileResampleProcessor | Bass class for invocations that preprocess images for ControlNet | +| Upscale | Upscales an image | +| VAE Loader | Loads a VAE model, outputting a VaeLoaderOutput | +| ZoeDepthImageProcessor | Applies Zoe depth processing to image | ## Examples From ba783d9466b9c524d7fc43c47239340c09ac59ef Mon Sep 17 00:00:00 2001 From: ymgenesis Date: Thu, 13 Jul 2023 15:17:47 +0200 Subject: [PATCH 07/12] add node grouping concepts & typos Added a section that focuses more on conceptualizing node workflows as groups of nodes working together as a whole. Also, typos. --- docs/features/NODES.md | 76 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 68 insertions(+), 8 deletions(-) diff --git a/docs/features/NODES.md b/docs/features/NODES.md index 382afb49e8..ed6eb89e23 100644 --- a/docs/features/NODES.md +++ b/docs/features/NODES.md @@ -2,7 +2,7 @@ The nodes editor is a blank canvas where you add modular node windows for image generation. The node processing flow is usually done from left to right, though linearity can become abstracted the more complex the node graph becomes. Nodes are connected via wires/noodles. -To better understand how nodes are used, think of how an electric power bar works. It takes in one input (electricity from a wall outlet) and passes it to multiple devices through multiple outputs. Similarly, a node could have multiple inputs and outputs functioning at the same (or different) time, but all node outputs pass information onwards like a power bar passes electricity. Not all outputs are compatible with all inputs, however, much like a power bar can’t take in spaghetti noodles instead of electricity. In general, node outputs are colour-coded to match compatible inputs of other nodes. +To better understand how nodes are used, think of how an electric power bar works. It takes in one input (electricity from a wall outlet) and passes it to multiple devices through multiple outputs. Similarly, a node could have multiple inputs and outputs functioning at the same (or different) time, but all node outputs pass information onward like a power bar passes electricity. Not all outputs are compatible with all inputs, however, much like a power bar can’t take in spaghetti noodles instead of electricity. In general, node outputs are colour-coded to match compatible inputs of other nodes. ## Anatomy of a Node @@ -14,11 +14,11 @@ Individual nodes are made up of the following: ## Diffusion Overview -Taking the time to understand the diffusion process will help you to understand how to setup your nodes in the nodes editor. +Taking the time to understand the diffusion process will help you to understand how to set up your nodes in the nodes editor. There are two main spaces Stable Diffusion works in: image space and latent space. -Image space represents images in pixel-form that you look at. Latent space represents compressed inputs. It’s in latent space that Stable Diffusion processes images. A VAE (Variational Auto Encoder) is responsible for compressing and encoding inputs into latent space, as well as decoding outputs back into image space. +Image space represents images in pixel form that you look at. Latent space represents compressed inputs. It’s in latent space that Stable Diffusion processes images. A VAE (Variational Auto Encoder) is responsible for compressing and encoding inputs into latent space, as well as decoding outputs back into image space. When you generate an image using text-to-image, multiple steps occur in latent space: 1. Random noise is generated at the chosen height and width. The noise’s characteristics are dictated by the chosen (or not chosen) seed. This noise tensor is passed into latent space. We’ll call this noise A. @@ -27,7 +27,7 @@ When you generate an image using text-to-image, multiple steps occur in latent s 1. The VAE decodes the final latent image from latent space into image space. image-to-image is a similar process, with only step 1 being different: -1. The input image is decoded from image space into latent space by the VAE. Noise is then added to the input latent image. Denoising Strength dictates how much noise is added, 0 being none, 1 being all-encompassing. We’ll call this noise A. The process is then the same as steps 2-4 in the text-to-image explanation above. +1. The input image is decoded from image space into latent space by the VAE. Noise is then added to the input latent image. Denoising Strength dictates how much noise is added, 0 being none, and 1 being all-encompassing. We’ll call this noise A. The process is then the same as steps 2-4 in the text-to-image explanation above. Furthermore, a model provides the CLIP prompt tokenizer, the VAE, and a U-Net (where noise prediction occurs given a prompt and initial noise tensor). @@ -53,7 +53,7 @@ A noise scheduler (eg. DPM++ 2M Karras) schedules the subtraction of noise from | ImageChannel | Gets a channel from an image | | ImageCollection | Load a collection of images and provide it as output | | ImageConvert | Converts an image to a different mode | -| ImageCrop | Crops an imiage to a specified box. The box can be outside of the image. | +| ImageCrop | Crops an image to a specified box. The box can be outside of the image. | | ImageInverseLerp | Inverse linear interpolation of all pixels of an image | | ImageLerp | Linear interpolation of all pixels of an image | | ImageMultiply | Multiplies two images together using `PIL.ImageChops.Multiply()` | @@ -64,7 +64,7 @@ A noise scheduler (eg. DPM++ 2M Karras) schedules the subtraction of noise from | ImageToLatents | Scales latents by a given factor | | InfillColor | Infills transparent areas of an image with a solid color | | InfillPatchMatch | Infills transparent areas of an image using the PatchMatch algorithm | -| InfillTile | Infills transparents areas of an image with tiles of the image | +| InfillTile | Infills transparent areas of an image with tiles of the image | | Inpaint | Generates an image using inpaint | | Iterate | Iterates over a list of items | | LatentsToImage | Generates an image from latents | @@ -104,9 +104,69 @@ A noise scheduler (eg. DPM++ 2M Karras) schedules the subtraction of noise from | VAE Loader | Loads a VAE model, outputting a VaeLoaderOutput | | ZoeDepthImageProcessor | Applies Zoe depth processing to image | +## Node Grouping Concepts + +There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. Note that the screenshots below aren't examples of complete functioning node graphs (see [Examples](https://github.com/ymgenesis/InvokeAI/edit/main/docs/features/NODES.md#examples)). + +### Noise + +As described, an initial noise tensor is necessary for the latent diffusion process. As a result, all non-image *ToLatents nodes require a noise node input. + +groupsnoise + +### Conditioning + +As described, conditioning is necessary for the latent diffusion process, whether empty or not. As a result, all non-image *ToLatents nodes require positive and negative conditioning inputs. Conditioning is reliant on a CLIP tokenizer provided by the Model Loader node. + +groupsconditioning + +### Image Space & VAE + +The ImageToLatents node doesn't require a noise node input, but requires a VAE input to convert the image from image space into latent space. In reverse, the LatentsToImage node requires a VAE input to convert from latent space back into image space. + +groupsimgvae + +### Defined & Random Seeds + +It is common to want to use both the same seed (for continuity) and random seeds (for variance). To define a seed, simply enter it into the 'Seed' field on a noise node. Conversely, the RandomInt node generates a random integer between 'Low' and 'High', and can be used as input to the 'Seed' edge point on a noise node to randomize your seed. + +groupsrandseed + +### Control + +Control means to guide the diffusion process to adhere to a defined input or structure. Control can be provided as input to non-image *ToLatents nodes from ControlNet nodes. ControlNet nodes usually require an image processor which converts an input image for use with ControlNet. + +groupscontrol + +### LoRA + +The Lora Loader node lets you load a LoRA (say that ten times fast) and pass it as output to both the Prompt (Compel) and non-image *ToLatents nodes. A model's CLIP tokenizer is passed through the LoRA into Prompt (Compel), where it affects conditioning. A model's U-Net is also passed through the LoRA into a non-image *ToLatents node, where it affects noise prediction. + +groupslora + +### Scaling + +Use the ImageScale, ScaleLatents, and Upscale nodes to upscale images and/or latent images. The chosen method differs across contexts. However, be aware that latents are already noisy and compressed at their original resolution; scaling an image could produce more detailed results. + +groupsallscale + +### Iteration + Multiple Images as Input + +Iteration is a common concept in any processing, and means to repeat a process with given input. In nodes, you're able to use the Iterate node to iterate through collections usually gathered by the Collect node. The Iterate node has many potential uses, from processing a collection of images one after another, to varying seeds across multiple image generations and more. This screenshot demonstrates how to collect several images and pass them out one at a time. + +groupsiterate + +### Multiple Image Generation + Random Seeds + +Multiple image generation in the node editor is done using the RandomRange node. In this case, the 'Size' field represents the number of images to generate. As RandomRange produces a collection of integers, we need to add the Iterate node to iterate through the collection. + +To control seeds across generations takes some care. The first row in the screenshot will generate multiple images with different seeds, but using the same RandomRange parameters across invocations will result in the same group of random seeds being used across the images, producing repeatable results. In the second row, adding the RandomInt node as input to RandomRange's 'Seed' edge point will ensure that seeds are varied across all images across invocations, producing varied results. + +groupsmultigenseeding + ## Examples -With our knowledge on the diffusion process, let’s break down some basic graphs in the nodes editor. Note that a node's options can be overridden by inputs from other nodes. These examples aren't strict rules to follow, and only demonstrate some basic configurations. +With our knowledge of node grouping and the diffusion process, let’s break down some basic graphs in the nodes editor. Note that a node's options can be overridden by inputs from other nodes. These examples aren't strict rules to follow and only demonstrate some basic configurations. ### Basic text-to-image Node Graph @@ -124,7 +184,7 @@ With our knowledge on the diffusion process, let’s break down some basic graph - Model Loader: Choose a model from the dropdown. - Prompt (Compel): Two prompt nodes. One positive (dog), one negative (dog). Same CLIP inputs from the Model Loader node as before. -- ImageToLatents: Upload a a source image directly in the node window, via drag'n'drop from the gallery, or passed in as input. The ImageToLatents node inputs the VAE from the Model Loader node to decode the chosen image from image space into latent space, hence the name ImageTo**Latents**. It outputs latents for use in the next LatentsToLatents node. It also outputs the source image's width and height for use in the next Noise node if the final image is to be the same dimensions as the source image. +- ImageToLatents: Upload a source image directly in the node window, via drag'n'drop from the gallery, or passed in as input. The ImageToLatents node inputs the VAE from the Model Loader node to decode the chosen image from image space into latent space, hence the name ImageTo**Latents**. It outputs latents for use in the next LatentsToLatents node. It also outputs the source image's width and height for use in the next Noise node if the final image is to be the same dimensions as the source image. - Noise: A noise tensor is created with the width and height of the source image, and connected to the next LatentsToLatents node. Notice the width and height fields are overridden by the input from the ImageToLatents width and height outputs. - LatentsToLatents: The inputs and options are nearly identical to TextToLatents, except that LatentsToLatents also takes latents as an input. Considering our source image is already converted to latents in the last ImageToLatents node, and text + noise are no longer the only inputs to process, we use the LatentsToLatents node. - LatentsToImage: Like previously, the LatentsToImage node will use the VAE from the Model Loader as input to decode the latents from LatentsToLatents into image space, and save it to the gallery. From 44662c0c0ee6c671a12bca4b5eca12e3133715ac Mon Sep 17 00:00:00 2001 From: ymgenesis Date: Thu, 13 Jul 2023 15:23:10 +0200 Subject: [PATCH 08/12] remove incorrect hyperlink --- docs/features/NODES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/features/NODES.md b/docs/features/NODES.md index ed6eb89e23..2e27d1a227 100644 --- a/docs/features/NODES.md +++ b/docs/features/NODES.md @@ -106,7 +106,7 @@ A noise scheduler (eg. DPM++ 2M Karras) schedules the subtraction of noise from ## Node Grouping Concepts -There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. Note that the screenshots below aren't examples of complete functioning node graphs (see [Examples](https://github.com/ymgenesis/InvokeAI/edit/main/docs/features/NODES.md#examples)). +There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. Note that the screenshots below aren't examples of complete functioning node graphs (see Examples). ### Noise From 565299c7a1048cc058987c0bdc180e403a52160b Mon Sep 17 00:00:00 2001 From: Kent Keirsey <31807370+hipsterusername@users.noreply.github.com> Date: Fri, 14 Jul 2023 16:09:47 -0400 Subject: [PATCH 09/12] Minor Updates to NODES.md --- docs/features/NODES.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/features/NODES.md b/docs/features/NODES.md index 2e27d1a227..1cd788b8aa 100644 --- a/docs/features/NODES.md +++ b/docs/features/NODES.md @@ -1,8 +1,8 @@ -# Nodes Editor +# Nodes Editor (Experimental Beta) -The nodes editor is a blank canvas where you add modular node windows for image generation. The node processing flow is usually done from left to right, though linearity can become abstracted the more complex the node graph becomes. Nodes are connected via wires/noodles. +The nodes editor is a blank canvas allowing for the use of individual functions and image transformations to control the image generation workflow. The node processing flow is usually done from left (inputs) to right (outputs), though linearity can become abstracted the more complex the node graph becomes. Nodes inputs and outputs are connected by dragging connectors from node to node. -To better understand how nodes are used, think of how an electric power bar works. It takes in one input (electricity from a wall outlet) and passes it to multiple devices through multiple outputs. Similarly, a node could have multiple inputs and outputs functioning at the same (or different) time, but all node outputs pass information onward like a power bar passes electricity. Not all outputs are compatible with all inputs, however, much like a power bar can’t take in spaghetti noodles instead of electricity. In general, node outputs are colour-coded to match compatible inputs of other nodes. +To better understand how nodes are used, think of how an electric power bar works. It takes in one input (electricity from a wall outlet) and passes it to multiple devices through multiple outputs. Similarly, a node could have multiple inputs and outputs functioning at the same (or different) time, but all node outputs pass information onward like a power bar passes electricity. Not all outputs are compatible with all inputs, however - Each node has different constraints on how it is expecting to input/output information. In general, node outputs are colour-coded to match compatible inputs of other nodes. ## Anatomy of a Node @@ -33,7 +33,7 @@ Furthermore, a model provides the CLIP prompt tokenizer, the VAE, and a U-Net (w A noise scheduler (eg. DPM++ 2M Karras) schedules the subtraction of noise from the latent image across the sampler steps chosen (step 3 above). Less noise is usually subtracted at higher sampler steps. -## Node Types +## Node Types (Base Nodes) | Node | Function | | ---------------------------------- | --------------------------------------------------------------------------------------| From 7093e5d033726582730cb017efff2e9d968993c9 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Sat, 15 Jul 2023 00:52:54 +0300 Subject: [PATCH 10/12] Pad conditionings using zeros and encoder_attention_mask --- invokeai/app/invocations/compel.py | 5 +-- .../diffusion/shared_invokeai_diffusion.py | 36 ++++++++++++++++++- 2 files changed, 36 insertions(+), 5 deletions(-) diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index 303e0a0c84..a5a9701149 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -100,7 +100,7 @@ class CompelInvocation(BaseInvocation): text_encoder=text_encoder, textual_inversion_manager=ti_manager, dtype_for_device_getter=torch_dtype, - truncate_long_prompts=True, # TODO: + truncate_long_prompts=False, ) conjunction = Compel.parse_prompt_string(self.prompt) @@ -112,9 +112,6 @@ class CompelInvocation(BaseInvocation): c, options = compel.build_conditioning_tensor_for_prompt_object( prompt) - # TODO: long prompt support - # if not self.truncate_long_prompts: - # [c, uc] = compel.pad_conditioning_tensors_to_same_length([c, uc]) ec = InvokeAIDiffuserComponent.ExtraConditioningInfo( tokens_count_including_eos_bos=get_max_token_count( tokenizer, conjunction), diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index 1175475bba..307e949ef8 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -241,11 +241,45 @@ class InvokeAIDiffuserComponent: def _apply_standard_conditioning(self, x, sigma, unconditioning, conditioning, **kwargs): # fast batched path + + def _pad_conditioning(cond, target_len, encoder_attention_mask): + conditioning_attention_mask = torch.ones((cond.shape[0], cond.shape[1]), device=cond.device, dtype=cond.dtype) + + if cond.shape[1] < max_len: + conditioning_attention_mask = torch.cat([ + conditioning_attention_mask, + torch.zeros((cond.shape[0], max_len - cond.shape[1]), device=cond.device, dtype=cond.dtype), + ], dim=1) + + cond = torch.cat([ + cond, + torch.zeros((cond.shape[0], max_len - cond.shape[1], cond.shape[2]), device=cond.device, dtype=cond.dtype), + ], dim=1) + + if encoder_attention_mask is None: + encoder_attention_mask = conditioning_attention_mask + else: + encoder_attention_mask = torch.cat([ + encoder_attention_mask, + conditioning_attention_mask, + ]) + + return cond, encoder_attention_mask + x_twice = torch.cat([x] * 2) sigma_twice = torch.cat([sigma] * 2) + + encoder_attention_mask = None + if unconditioning.shape[1] != conditioning.shape[1]: + max_len = max(unconditioning.shape[1], conditioning.shape[1]) + unconditioning, encoder_attention_mask = _pad_conditioning(unconditioning, max_len, encoder_attention_mask) + conditioning, encoder_attention_mask = _pad_conditioning(conditioning, max_len, encoder_attention_mask) + both_conditionings = torch.cat([unconditioning, conditioning]) both_results = self.model_forward_callback( - x_twice, sigma_twice, both_conditionings, **kwargs, + x_twice, sigma_twice, both_conditionings, + encoder_attention_mask=encoder_attention_mask, + **kwargs, ) unconditioned_next_x, conditioned_next_x = both_results.chunk(2) return unconditioned_next_x, conditioned_next_x From 8cb19578c27bdf2cea0b87952623d4c2585c1256 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 15 Jul 2023 11:07:13 +1000 Subject: [PATCH 11/12] fix(ui): fix crash on LoRA remove / weight change --- invokeai/frontend/web/src/features/lora/store/loraSlice.ts | 3 ++- .../src/features/nodes/util/graphBuilders/addLoRAsToGraph.ts | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/web/src/features/lora/store/loraSlice.ts b/invokeai/frontend/web/src/features/lora/store/loraSlice.ts index 2dc739a737..f0067a85a2 100644 --- a/invokeai/frontend/web/src/features/lora/store/loraSlice.ts +++ b/invokeai/frontend/web/src/features/lora/store/loraSlice.ts @@ -3,6 +3,7 @@ import { LoRAModelParam } from 'features/parameters/types/parameterSchemas'; import { LoRAModelConfigEntity } from 'services/api/endpoints/models'; export type LoRA = LoRAModelParam & { + id: string; weight: number; }; @@ -24,7 +25,7 @@ export const loraSlice = createSlice({ reducers: { loraAdded: (state, action: PayloadAction) => { const { model_name, id, base_model } = action.payload; - state.loras[id] = { model_name, base_model, ...defaultLoRAConfig }; + state.loras[id] = { id, model_name, base_model, ...defaultLoRAConfig }; }, loraRemoved: (state, action: PayloadAction) => { const id = action.payload; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addLoRAsToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addLoRAsToGraph.ts index 5d1f3d05d2..a2cf1477f2 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addLoRAsToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addLoRAsToGraph.ts @@ -60,7 +60,7 @@ export const addLoRAsToGraph = ( const loraLoaderNode: LoraLoaderInvocation = { type: 'lora_loader', id: currentLoraNodeId, - lora, + lora: { model_name, base_model }, weight, }; From 194434dbfa23706270a5a637c790918887a1e124 Mon Sep 17 00:00:00 2001 From: Mary Hipp Date: Fri, 14 Jul 2023 13:14:41 -0400 Subject: [PATCH 12/12] restore scrollbar --- .../components/ImageGrid/GalleryImageGrid.tsx | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImageGrid.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImageGrid.tsx index 858eeedaa3..8b44b39ae9 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImageGrid.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImageGrid.tsx @@ -118,6 +118,20 @@ const GalleryImageGrid = () => { ); }, [dispatch, imageNames.length, galleryView]); + useEffect(() => { + // Set up gallery scroler + const { current: root } = rootRef; + if (scroller && root) { + initialize({ + target: root, + elements: { + viewport: scroller, + }, + }); + } + return () => osInstance()?.destroy(); + }, [scroller, initialize, osInstance]); + const handleEndReached = useMemo(() => { if (areMoreAvailable) { return handleLoadMoreImages;