Compare commits

..

3 Commits

Author SHA1 Message Date
00becf83d3 Blackified 2023-10-22 00:09:58 -04:00
412fbe592e Added gradient node to image.py 2023-10-22 00:08:07 -04:00
1e59645882 Add negative IP Adapter support 2023-10-20 23:01:13 -04:00
396 changed files with 9637 additions and 18689 deletions

1
.gitattributes vendored
View File

@ -2,4 +2,3 @@
# Only affects text files and ignores other file types. # Only affects text files and ignores other file types.
# For more info see: https://www.aleksandrhovhannisyan.com/blog/crlf-vs-lf-normalizing-line-endings-in-git/ # For more info see: https://www.aleksandrhovhannisyan.com/blog/crlf-vs-lf-normalizing-line-endings-in-git/
* text=auto * text=auto
docker/** text eol=lf

20
.github/workflows/pyflakes.yml vendored Normal file
View File

@ -0,0 +1,20 @@
on:
pull_request:
push:
branches:
- main
- development
- 'release-candidate-*'
jobs:
pyflakes:
name: runner / pyflakes
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: pyflakes
uses: reviewdog/action-pyflakes@v1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
reporter: github-pr-review

View File

@ -6,7 +6,7 @@ on:
branches: main branches: main
jobs: jobs:
ruff: black:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
@ -18,7 +18,8 @@ jobs:
- name: Install dependencies with pip - name: Install dependencies with pip
run: | run: |
pip install ruff pip install black flake8 Flake8-pyproject isort
- run: ruff check --output-format=github . - run: isort --check-only .
- run: ruff format --check . - run: black --check .
- run: flake8

View File

@ -161,7 +161,7 @@ the command `npm install -g yarn` if needed)
_For Windows/Linux with an NVIDIA GPU:_ _For Windows/Linux with an NVIDIA GPU:_
```terminal ```terminal
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121 pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu118
``` ```
_For Linux with an AMD GPU:_ _For Linux with an AMD GPU:_
@ -175,7 +175,7 @@ the command `npm install -g yarn` if needed)
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
``` ```
_For Macintoshes, either Intel or M1/M2/M3:_ _For Macintoshes, either Intel or M1/M2:_
```sh ```sh
pip install InvokeAI --use-pep517 pip install InvokeAI --use-pep517

View File

@ -11,5 +11,5 @@ INVOKEAI_ROOT=
# HUGGING_FACE_HUB_TOKEN= # HUGGING_FACE_HUB_TOKEN=
## optional variables specific to the docker setup. ## optional variables specific to the docker setup.
# GPU_DRIVER=cuda # or rocm # GPU_DRIVER=cuda
# CONTAINER_UID=1000 # CONTAINER_UID=1000

View File

@ -18,8 +18,8 @@ ENV INVOKEAI_SRC=/opt/invokeai
ENV VIRTUAL_ENV=/opt/venv/invokeai ENV VIRTUAL_ENV=/opt/venv/invokeai
ENV PATH="$VIRTUAL_ENV/bin:$PATH" ENV PATH="$VIRTUAL_ENV/bin:$PATH"
ARG TORCH_VERSION=2.1.0 ARG TORCH_VERSION=2.0.1
ARG TORCHVISION_VERSION=0.16 ARG TORCHVISION_VERSION=0.15.2
ARG GPU_DRIVER=cuda ARG GPU_DRIVER=cuda
ARG TARGETPLATFORM="linux/amd64" ARG TARGETPLATFORM="linux/amd64"
# unused but available # unused but available
@ -35,7 +35,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \ if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
elif [ "$GPU_DRIVER" = "rocm" ]; then \ elif [ "$GPU_DRIVER" = "rocm" ]; then \
extra_index_url_arg="--index-url https://download.pytorch.org/whl/rocm5.6"; \ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.4.2"; \
else \ else \
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu121"; \ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu121"; \
fi &&\ fi &&\

View File

@ -15,10 +15,6 @@ services:
- driver: nvidia - driver: nvidia
count: 1 count: 1
capabilities: [gpu] capabilities: [gpu]
# For AMD support, comment out the deploy section above and uncomment the devices section below:
#devices:
# - /dev/kfd:/dev/kfd
# - /dev/dri:/dev/dri
build: build:
context: .. context: ..
dockerfile: docker/Dockerfile dockerfile: docker/Dockerfile

View File

@ -7,5 +7,5 @@ set -e
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}") SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
cd "$SCRIPTDIR" || exit 1 cd "$SCRIPTDIR" || exit 1
docker compose up -d docker compose up --build -d
docker compose logs -f docker compose logs -f

View File

@ -1,6 +1,6 @@
# Nodes # Invocations
Features in InvokeAI are added in the form of modular nodes systems called Features in InvokeAI are added in the form of modular node-like systems called
**Invocations**. **Invocations**.
An Invocation is simply a single operation that takes in some inputs and gives An Invocation is simply a single operation that takes in some inputs and gives
@ -9,34 +9,13 @@ complex functionality.
## Invocations Directory ## Invocations Directory
InvokeAI Nodes can be found in the `invokeai/app/invocations` directory. These can be used as examples to create your own nodes. InvokeAI Invocations can be found in the `invokeai/app/invocations` directory.
New nodes should be added to a subfolder in `nodes` direction found at the root level of the InvokeAI installation location. Nodes added to this folder will be able to be used upon application startup. You can add your new functionality to one of the existing Invocations in this
directory or create a new file in this directory as per your needs.
Example `nodes` subfolder structure:
```py
├── __init__.py # Invoke-managed custom node loader
├── cool_node
├── __init__.py # see example below
└── cool_node.py
└── my_node_pack
├── __init__.py # see example below
├── tasty_node.py
├── bodacious_node.py
├── utils.py
└── extra_nodes
└── fancy_node.py
```
Each node folder must have an `__init__.py` file that imports its nodes. Only nodes imported in the `__init__.py` file are loaded.
See the README in the nodes folder for more examples:
```py
from .cool_node import CoolInvocation
```
**Note:** _All Invocations must be inside this directory for InvokeAI to
recognize them as valid Invocations._
## Creating A New Invocation ## Creating A New Invocation

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,12 @@
---
title: Textual Inversion Embeddings and LoRAs
---
# :material-library-shelves: Textual Inversions and LoRAs
With the advances in research, many new capabilities are available to customize the knowledge and understanding of novel concepts not originally contained in the base model.
## Using Textual Inversion Files ## Using Textual Inversion Files
Textual inversion (TI) files are small models that customize the output of Textual inversion (TI) files are small models that customize the output of
@ -52,4 +61,29 @@ files it finds there for compatible models. At startup you will see a message si
>> Current embedding manager terms: <HOI4-Leader>, <princess-knight> >> Current embedding manager terms: <HOI4-Leader>, <princess-knight>
``` ```
To use these when generating, simply type the `<` key in your prompt to open the Textual Inversion WebUI and To use these when generating, simply type the `<` key in your prompt to open the Textual Inversion WebUI and
select the embedding you'd like to use. This UI has type-ahead support, so you can easily find supported embeddings. select the embedding you'd like to use. This UI has type-ahead support, so you can easily find supported embeddings.
## Using LoRAs
LoRA files are models that customize the output of Stable Diffusion
image generation. Larger than embeddings, but much smaller than full
models, they augment SD with improved understanding of subjects and
artistic styles.
Unlike TI files, LoRAs do not introduce novel vocabulary into the
model's known tokens. Instead, LoRAs augment the model's weights that
are applied to generate imagery. LoRAs may be supplied with a
"trigger" word that they have been explicitly trained on, or may
simply apply their effect without being triggered.
LoRAs are typically stored in .safetensors files, which are the most
secure way to store and transmit these types of weights. You may
install any number of `.safetensors` LoRA files simply by copying them
into the `autoimport/lora` directory of the corresponding InvokeAI models
directory (usually `invokeai` in your home directory).
To use these when generating, open the LoRA menu item in the options
panel, select the LoRAs you want to apply and ensure that they have
the appropriate weight recommended by the model provider. Typically,
most LoRAs perform best at a weight of .75-1.

View File

@ -150,6 +150,7 @@ Start/End - 0 represents the start of the generation, 1 represents the end. The
Additionally, each section can be expanded with the "Show Advanced" button in order to manipulate settings for the image pre-processor that adjusts your uploaded image before using it in during the generation process. Additionally, each section can be expanded with the "Show Advanced" button in order to manipulate settings for the image pre-processor that adjusts your uploaded image before using it in during the generation process.
**Note:** T2I-Adapter models and ControlNet models cannot currently be used together.
## IP-Adapter ## IP-Adapter

View File

@ -1,53 +0,0 @@
---
title: LoRAs & LCM-LoRAs
---
# :material-library-shelves: LoRAs & LCM-LoRAs
With the advances in research, many new capabilities are available to customize the knowledge and understanding of novel concepts not originally contained in the base model.
## LoRAs
Low-Rank Adaptation (LoRA) files are models that customize the output of Stable Diffusion
image generation. Larger than embeddings, but much smaller than full
models, they augment SD with improved understanding of subjects and
artistic styles.
Unlike TI files, LoRAs do not introduce novel vocabulary into the
model's known tokens. Instead, LoRAs augment the model's weights that
are applied to generate imagery. LoRAs may be supplied with a
"trigger" word that they have been explicitly trained on, or may
simply apply their effect without being triggered.
LoRAs are typically stored in .safetensors files, which are the most
secure way to store and transmit these types of weights. You may
install any number of `.safetensors` LoRA files simply by copying them
into the `autoimport/lora` directory of the corresponding InvokeAI models
directory (usually `invokeai` in your home directory).
To use these when generating, open the LoRA menu item in the options
panel, select the LoRAs you want to apply and ensure that they have
the appropriate weight recommended by the model provider. Typically,
most LoRAs perform best at a weight of .75-1.
## LCM-LoRAs
Latent Consistency Models (LCMs) allowed a reduced number of steps to be used to generate images with Stable Diffusion. These are created by distilling base models, creating models that only require a small number of steps to generate images. However, LCMs require that any fine-tune of a base model be distilled to be used as an LCM.
LCM-LoRAs are models that provide the benefit of LCMs but are able to be used as LoRAs and applied to any fine tune of a base model. LCM-LoRAs are created by training a small number of adapters, rather than distilling the entire fine-tuned base model. The resulting LoRA can be used the same way as a standard LoRA, but with a greatly reduced step count. This enables SDXL images to be generated up to 10x faster than without the use of LCM-LoRAs.
**Using LCM-LoRAs**
LCM-LoRAs are natively supported in InvokeAI throughout the application. To get started, install any diffusers format LCM-LoRAs using the model manager and select it in the LoRA field.
There are a number parameter differences when using LCM-LoRAs and standard generation:
- When using LCM-LoRAs, the LoRA strength should be lower than if using a standard LoRA, with 0.35 recommended as a starting point.
- The LCM scheduler should be used for generation
- CFG-Scale should be reduced to ~1
- Steps should be reduced in the range of 4-8
Standard LoRAs can also be used alongside LCM-LoRAs, but will also require a lower strength, with 0.45 being recommended as a starting point.
More information can be found here: https://huggingface.co/blog/lcm_lora#fast-inference-with-sdxl-lcm-loras

View File

@ -20,7 +20,7 @@ a single convenient digital artist-optimized user interface.
### * [Prompt Engineering](PROMPTS.md) ### * [Prompt Engineering](PROMPTS.md)
Get the images you want with the InvokeAI prompt engineering language. Get the images you want with the InvokeAI prompt engineering language.
### * The [LoRA, LyCORIS, LCM-LoRA Models](CONCEPTS.md) ### * The [LoRA, LyCORIS and Textual Inversion Models](CONCEPTS.md)
Add custom subjects and styles using a variety of fine-tuned models. Add custom subjects and styles using a variety of fine-tuned models.
### * [ControlNet](CONTROLNET.md) ### * [ControlNet](CONTROLNET.md)
@ -40,7 +40,7 @@ guide also covers optimizing models to load quickly.
Teach an old model new tricks. Merge 2-3 models together to create a Teach an old model new tricks. Merge 2-3 models together to create a
new model that combines characteristics of the originals. new model that combines characteristics of the originals.
### * [Textual Inversion](TEXTUAL_INVERSIONS.md) ### * [Textual Inversion](TRAINING.md)
Personalize models by adding your own style or subjects. Personalize models by adding your own style or subjects.
## Other Features ## Other Features

View File

@ -1,43 +0,0 @@
# FAQs
**Where do I get started? How can I install Invoke?**
- You can download the latest installers [here](https://github.com/invoke-ai/InvokeAI/releases) - Note that any releases marked as *pre-release* are in a beta state. You may experience some issues, but we appreciate your help testing those! For stable/reliable installations, please install the **[Latest Release](https://github.com/invoke-ai/InvokeAI/releases/latest)**
**How can I download models? Can I use models I already have downloaded?**
- Models can be downloaded through the model manager, or through option [4] in the invoke.bat/invoke.sh launcher script. To download a model through the Model Manager, use the HuggingFace Repo ID by pressing the “Copy” button next to the repository name. Alternatively, to download a model from CivitAi, use the download link in the Model Manager.
- Models that are already downloaded can be used by creating a symlink to the model location in the `autoimport` folder or by using the Model Mangers “Scan for Models” function.
**My images are taking a long time to generate. How can I speed up generation?**
- A common solution is to reduce the size of your RAM & VRAM cache to 0.25. This ensures your system has enough memory to generate images.
- Additionally, check the [hardware requirements](https://invoke-ai.github.io/InvokeAI/#hardware-requirements) to ensure that your system is capable of generating images.
- Lastly, double check your generations are happening on your GPU (if you have one). InvokeAI will log what is being used for generation upon startup.
**Ive installed Python on Windows but the installer says it cant find it?**
- Then ensure that you checked **'Add python.exe to PATH'** when installing Python. This can be found at the bottom of the Python Installer window. If you already have Python installed, this can be done with the modify / repair feature of the installer.
**Ive installed everything successfully but I still get an error about Triton when starting Invoke?**
- This can be safely ignored. InvokeAI doesn't use Triton, but if you are on Linux and wish to dismiss the error, you can install Triton.
**I updated to 3.4.0 and now xFormers cant load C++/CUDA?**
- An issue occurred with your PyTorch update. Follow these steps to fix :
1. Launch your invoke.bat / invoke.sh and select the option to open the developer console
2. Run:`pip install ".[xformers]" --upgrade --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu121`
- If you run into an error with `typing_extensions`, re-open the developer console and run: `pip install -U typing-extensions`
**It says my pip is out of date - is that why my install isn't working?**
- An out of date won't cause an installation to fail. The cause of the error can likely be found above the message that says pip is out of date.
- If you saw that warning but the install went well, don't worry about it (but you can update pip afterwards if you'd like).
**How can I generate the exact same that I found on the internet?**
Most example images with prompts that you'll find on the internet have been generated using different software, so you can't expect to get identical results. In order to reproduce an image, you need to replicate the exact settings and processing steps, including (but not limited to) the model, the positive and negative prompts, the seed, the sampler, the exact image size, any upscaling steps, etc.
**Where can I get more help?**
- Create an issue on [GitHub](https://github.com/invoke-ai/InvokeAI/issues) or post in the [#help channel](https://discord.com/channels/1020123559063990373/1149510134058471514) of the InvokeAI Discord

View File

@ -101,13 +101,16 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB of RAM.
<div align="center"><img src="assets/invoke-web-server-1.png" width=640></div> <div align="center"><img src="assets/invoke-web-server-1.png" width=640></div>
!!! Note
This project is rapidly evolving. Please use the [Issues tab](https://github.com/invoke-ai/InvokeAI/issues) to report bugs and make feature requests. Be sure to use the provided templates as it will help aid response time.
## :octicons-link-24: Quick Links ## :octicons-link-24: Quick Links
<div class="button-container"> <div class="button-container">
<a href="installation/INSTALLATION"> <button class="button">Installation</button> </a> <a href="installation/INSTALLATION"> <button class="button">Installation</button> </a>
<a href="features/"> <button class="button">Features</button> </a> <a href="features/"> <button class="button">Features</button> </a>
<a href="help/gettingStartedWithAI/"> <button class="button">Getting Started</button> </a> <a href="help/gettingStartedWithAI/"> <button class="button">Getting Started</button> </a>
<a href="help/FAQ/"> <button class="button">FAQ</button> </a>
<a href="contributing/CONTRIBUTING/"> <button class="button">Contributing</button> </a> <a href="contributing/CONTRIBUTING/"> <button class="button">Contributing</button> </a>
<a href="https://github.com/invoke-ai/InvokeAI/"> <button class="button">Code and Downloads</button> </a> <a href="https://github.com/invoke-ai/InvokeAI/"> <button class="button">Code and Downloads</button> </a>
<a href="https://github.com/invoke-ai/InvokeAI/issues"> <button class="button">Bug Reports </button> </a> <a href="https://github.com/invoke-ai/InvokeAI/issues"> <button class="button">Bug Reports </button> </a>
@ -195,7 +198,6 @@ The list of schedulers has been completely revamped and brought up to date:
| **dpmpp_2m** | DPMSolverMultistepScheduler | original noise scnedule | | **dpmpp_2m** | DPMSolverMultistepScheduler | original noise scnedule |
| **dpmpp_2m_k** | DPMSolverMultistepScheduler | using karras noise schedule | | **dpmpp_2m_k** | DPMSolverMultistepScheduler | using karras noise schedule |
| **unipc** | UniPCMultistepScheduler | CPU only | | **unipc** | UniPCMultistepScheduler | CPU only |
| **lcm** | LCMScheduler | |
Please see [3.0.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v3.0.0) for further details. Please see [3.0.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v3.0.0) for further details.

View File

@ -179,7 +179,7 @@ experimental versions later.
you will have the choice of CUDA (NVidia cards), ROCm (AMD cards), you will have the choice of CUDA (NVidia cards), ROCm (AMD cards),
or CPU (no graphics acceleration). On Windows, you'll have the or CPU (no graphics acceleration). On Windows, you'll have the
choice of CUDA vs CPU, and on Macs you'll be offered CPU only. When choice of CUDA vs CPU, and on Macs you'll be offered CPU only. When
you select CPU on M1/M2/M3 Macintoshes, you will get MPS-based you select CPU on M1 or M2 Macintoshes, you will get MPS-based
graphics acceleration without installing additional drivers. If you graphics acceleration without installing additional drivers. If you
are unsure what GPU you are using, you can ask the installer to are unsure what GPU you are using, you can ask the installer to
guess. guess.
@ -471,7 +471,7 @@ Then type the following commands:
=== "NVIDIA System" === "NVIDIA System"
```bash ```bash
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu121 pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu118
pip install xformers pip install xformers
``` ```

View File

@ -148,7 +148,7 @@ manager, please follow these steps:
=== "CUDA (NVidia)" === "CUDA (NVidia)"
```bash ```bash
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121 pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu118
``` ```
=== "ROCm (AMD)" === "ROCm (AMD)"
@ -327,7 +327,7 @@ installation protocol (important!)
=== "CUDA (NVidia)" === "CUDA (NVidia)"
```bash ```bash
pip install -e .[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121 pip install -e .[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu118
``` ```
=== "ROCm (AMD)" === "ROCm (AMD)"
@ -375,7 +375,7 @@ you can do so using this unsupported recipe:
mkdir ~/invokeai mkdir ~/invokeai
conda create -n invokeai python=3.10 conda create -n invokeai python=3.10
conda activate invokeai conda activate invokeai
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121 pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu118
invokeai-configure --root ~/invokeai invokeai-configure --root ~/invokeai
invokeai --root ~/invokeai --web invokeai --root ~/invokeai --web
``` ```

View File

@ -85,7 +85,7 @@ You can find which version you should download from [this link](https://docs.nvi
When installing torch and torchvision manually with `pip`, remember to provide When installing torch and torchvision manually with `pip`, remember to provide
the argument `--extra-index-url the argument `--extra-index-url
https://download.pytorch.org/whl/cu121` as described in the [Manual https://download.pytorch.org/whl/cu118` as described in the [Manual
Installation Guide](020_INSTALL_MANUAL.md). Installation Guide](020_INSTALL_MANUAL.md).
## :simple-amd: ROCm ## :simple-amd: ROCm

View File

@ -30,7 +30,7 @@ methodology for details on why running applications in such a stateless fashion
The container is configured for CUDA by default, but can be built to support AMD GPUs The container is configured for CUDA by default, but can be built to support AMD GPUs
by setting the `GPU_DRIVER=rocm` environment variable at Docker image build time. by setting the `GPU_DRIVER=rocm` environment variable at Docker image build time.
Developers on Apple silicon (M1/M2/M3): You Developers on Apple silicon (M1/M2): You
[can't access your GPU cores from Docker containers](https://github.com/pytorch/pytorch/issues/81224) [can't access your GPU cores from Docker containers](https://github.com/pytorch/pytorch/issues/81224)
and performance is reduced compared with running it directly on macOS but for and performance is reduced compared with running it directly on macOS but for
development purposes it's fine. Once you're done with development tasks on your development purposes it's fine. Once you're done with development tasks on your
@ -99,14 +99,3 @@ If using an AMD GPU:
Use the standard `docker compose up` command, and generally the `docker compose` [CLI](https://docs.docker.com/compose/reference/) as usual. Use the standard `docker compose up` command, and generally the `docker compose` [CLI](https://docs.docker.com/compose/reference/) as usual.
Once the container starts up (and configures the InvokeAI root directory if this is a new installation), you can access InvokeAI at [http://localhost:9090](http://localhost:9090) Once the container starts up (and configures the InvokeAI root directory if this is a new installation), you can access InvokeAI at [http://localhost:9090](http://localhost:9090)
## Troubleshooting / FAQ
- Q: I am running on Windows under WSL2, and am seeing a "no such file or directory" error.
- A: Your `docker-entrypoint.sh` file likely has Windows (CRLF) as opposed to Unix (LF) line endings,
and you may have cloned this repository before the issue was fixed. To solve this, please change
the line endings in the `docker-entrypoint.sh` file to `LF`. You can do this in VSCode
(`Ctrl+P` and search for "line endings"), or by using the `dos2unix` utility in WSL.
Finally, you may delete `docker-entrypoint.sh` followed by `git pull; git checkout docker/docker-entrypoint.sh`
to reset the file to its most recent version.
For more information on this issue, please see the [Docker Desktop documentation](https://docs.docker.com/desktop/troubleshoot/topics/#avoid-unexpected-syntax-errors-use-unix-style-line-endings-for-files-in-containers)

View File

@ -28,7 +28,7 @@ command line, then just be sure to activate it's virtual environment.
Then run the following three commands: Then run the following three commands:
```sh ```sh
pip install xformers~=0.0.22 pip install xformers~=0.0.19
pip install triton # WON'T WORK ON WINDOWS pip install triton # WON'T WORK ON WINDOWS
python -m xformers.info output python -m xformers.info output
``` ```
@ -42,7 +42,7 @@ If all goes well, you'll see a report like the
following: following:
```sh ```sh
xFormers 0.0.22 xFormers 0.0.20
memory_efficient_attention.cutlassF: available memory_efficient_attention.cutlassF: available
memory_efficient_attention.cutlassB: available memory_efficient_attention.cutlassB: available
memory_efficient_attention.flshattF: available memory_efficient_attention.flshattF: available
@ -59,14 +59,14 @@ swiglu.gemm_fused_operand_sum: available
swiglu.fused.p.cpp: available swiglu.fused.p.cpp: available
is_triton_available: True is_triton_available: True
is_functorch_available: False is_functorch_available: False
pytorch.version: 2.1.0+cu121 pytorch.version: 2.0.1+cu118
pytorch.cuda: available pytorch.cuda: available
gpu.compute_capability: 8.9 gpu.compute_capability: 8.9
gpu.name: NVIDIA GeForce RTX 4070 gpu.name: NVIDIA GeForce RTX 4070
build.info: available build.info: available
build.cuda_version: 1108 build.cuda_version: 1108
build.python_version: 3.10.11 build.python_version: 3.10.11
build.torch_version: 2.1.0+cu121 build.torch_version: 2.0.1+cu118
build.env.TORCH_CUDA_ARCH_LIST: 5.0+PTX 6.0 6.1 7.0 7.5 8.0 8.6 build.env.TORCH_CUDA_ARCH_LIST: 5.0+PTX 6.0 6.1 7.0 7.5 8.0 8.6
build.env.XFORMERS_BUILD_TYPE: Release build.env.XFORMERS_BUILD_TYPE: Release
build.env.XFORMERS_ENABLE_DEBUG_ASSERTIONS: None build.env.XFORMERS_ENABLE_DEBUG_ASSERTIONS: None
@ -92,22 +92,33 @@ installed from source. These instructions were written for a system
running Ubuntu 22.04, but other Linux distributions should be able to running Ubuntu 22.04, but other Linux distributions should be able to
adapt this recipe. adapt this recipe.
#### 1. Install CUDA Toolkit 12.1 #### 1. Install CUDA Toolkit 11.8
You will need the CUDA developer's toolkit in order to compile and You will need the CUDA developer's toolkit in order to compile and
install xFormers. **Do not try to install Ubuntu's nvidia-cuda-toolkit install xFormers. **Do not try to install Ubuntu's nvidia-cuda-toolkit
package.** It is out of date and will cause conflicts among the NVIDIA package.** It is out of date and will cause conflicts among the NVIDIA
driver and binaries. Instead install the CUDA Toolkit package provided driver and binaries. Instead install the CUDA Toolkit package provided
by NVIDIA itself. Go to [CUDA Toolkit 12.1 by NVIDIA itself. Go to [CUDA Toolkit 11.8
Downloads](https://developer.nvidia.com/cuda-12-1-0-download-archive) Downloads](https://developer.nvidia.com/cuda-11-8-0-download-archive)
and use the target selection wizard to choose your platform and Linux and use the target selection wizard to choose your platform and Linux
distribution. Select an installer type of "runfile (local)" at the distribution. Select an installer type of "runfile (local)" at the
last step. last step.
This will provide you with a recipe for downloading and running a This will provide you with a recipe for downloading and running a
install shell script that will install the toolkit and drivers. install shell script that will install the toolkit and drivers. For
example, the install script recipe for Ubuntu 22.04 running on a
x86_64 system is:
#### 2. Confirm/Install pyTorch 2.1.0 with CUDA 12.1 support ```
wget https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run
sudo sh cuda_11.8.0_520.61.05_linux.run
```
Rather than cut-and-paste this example, We recommend that you walk
through the toolkit wizard in order to get the most up to date
installer for your system.
#### 2. Confirm/Install pyTorch 2.01 with CUDA 11.8 support
If you are using InvokeAI 3.0.2 or higher, these will already be If you are using InvokeAI 3.0.2 or higher, these will already be
installed. If not, you can check whether you have the needed libraries installed. If not, you can check whether you have the needed libraries
@ -122,7 +133,7 @@ Then run the command:
python -c 'exec("import torch\nprint(torch.__version__)")' python -c 'exec("import torch\nprint(torch.__version__)")'
``` ```
If it prints __2.1.0+cu121__ you're good. If not, you can install the If it prints __1.13.1+cu118__ you're good. If not, you can install the
most up to date libraries with this command: most up to date libraries with this command:
```sh ```sh

View File

@ -4,16 +4,11 @@ These are nodes that have been developed by the community, for the community. If
If you'd like to submit a node for the community, please refer to the [node creation overview](contributingNodes.md). If you'd like to submit a node for the community, please refer to the [node creation overview](contributingNodes.md).
To use a node, add the node to the `nodes` folder found in your InvokeAI install location. To download a node, simply download the `.py` node file from the link and add it to the `invokeai/app/invocations` folder in your Invoke AI install location. If you used the automated installation, this can be found inside the `.venv` folder. Along with the node, an example node graph should be provided to help you get started with the node.
The suggested method is to use `git clone` to clone the repository the node is found in. This allows for easy updates of the node in the future.
If you'd prefer, you can also just download the `.py` file from the linked repository and add it to the `nodes` folder.
To use a community workflow, download the the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor. To use a community workflow, download the the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor.
- Community Nodes - Community Nodes
+ [Average Images](#average-images)
+ [Depth Map from Wavefront OBJ](#depth-map-from-wavefront-obj) + [Depth Map from Wavefront OBJ](#depth-map-from-wavefront-obj)
+ [Film Grain](#film-grain) + [Film Grain](#film-grain)
+ [Generative Grammar-Based Prompt Nodes](#generative-grammar-based-prompt-nodes) + [Generative Grammar-Based Prompt Nodes](#generative-grammar-based-prompt-nodes)
@ -32,20 +27,12 @@ To use a community workflow, download the the `.json` node graph file and load i
+ [Size Stepper Nodes](#size-stepper-nodes) + [Size Stepper Nodes](#size-stepper-nodes)
+ [Text font to Image](#text-font-to-image) + [Text font to Image](#text-font-to-image)
+ [Thresholding](#thresholding) + [Thresholding](#thresholding)
+ [Unsharp Mask](#unsharp-mask)
+ [XY Image to Grid and Images to Grids nodes](#xy-image-to-grid-and-images-to-grids-nodes) + [XY Image to Grid and Images to Grids nodes](#xy-image-to-grid-and-images-to-grids-nodes)
- [Example Node Template](#example-node-template) - [Example Node Template](#example-node-template)
- [Disclaimer](#disclaimer) - [Disclaimer](#disclaimer)
- [Help](#help) - [Help](#help)
--------------------------------
### Average Images
**Description:** This node takes in a collection of images of the same size and averages them as output. It converts everything to RGB mode first.
**Node Link:** https://github.com/JPPhoto/average-images-node
-------------------------------- --------------------------------
### Depth Map from Wavefront OBJ ### Depth Map from Wavefront OBJ
@ -190,8 +177,12 @@ This includes 15 Nodes:
**Node Link:** https://github.com/helix4u/load_video_frame **Node Link:** https://github.com/helix4u/load_video_frame
**Example Node Graph:** https://github.com/helix4u/load_video_frame/blob/main/Example_Workflow.json
**Output Example:** **Output Example:**
<img src="https://raw.githubusercontent.com/helix4u/load_video_frame/main/_git_assets/testmp4_embed_converted.gif" width="500" />
<img src="https://raw.githubusercontent.com/helix4u/load_video_frame/main/testmp4_embed_converted.gif" width="500" />
[Full mp4 of Example Output test.mp4](https://github.com/helix4u/load_video_frame/blob/main/test.mp4)
-------------------------------- --------------------------------
### Make 3D ### Make 3D
@ -317,13 +308,6 @@ Highlights/Midtones/Shadows (with LUT blur enabled):
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/0a440e43-697f-4d17-82ee-f287467df0a5" width="300" /> <img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/0a440e43-697f-4d17-82ee-f287467df0a5" width="300" />
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/0701fd0f-2ca7-4fe2-8613-2b52547bafce" width="300" /> <img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/0701fd0f-2ca7-4fe2-8613-2b52547bafce" width="300" />
--------------------------------
### Unsharp Mask
**Description:** Applies an unsharp mask filter to an image, preserving its alpha channel in the process.
**Node Link:** https://github.com/JPPhoto/unsharp-mask-node
-------------------------------- --------------------------------
### XY Image to Grid and Images to Grids nodes ### XY Image to Grid and Images to Grids nodes
@ -341,9 +325,9 @@ See full docs here: https://github.com/skunkworxdark/XYGrid_nodes/edit/main/READ
**Description:** This node allows you to do super cool things with InvokeAI. **Description:** This node allows you to do super cool things with InvokeAI.
**Node Link:** https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/app/invocations/prompt.py **Node Link:** https://github.com/invoke-ai/InvokeAI/fake_node.py
**Example Workflow:** https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/Prompt_from_File.json **Example Node Graph:** https://github.com/invoke-ai/InvokeAI/fake_node_graph.json
**Output Examples** **Output Examples**

View File

@ -4,7 +4,7 @@ To learn about the specifics of creating a new node, please visit our [Node crea
Once youve created a node and confirmed that it behaves as expected locally, follow these steps: Once youve created a node and confirmed that it behaves as expected locally, follow these steps:
- Make sure the node is contained in a new Python (.py) file. Preferably, the node is in a repo with a README detailing the nodes usage & examples to help others more easily use your node. Including the tag "invokeai-node" in your repository's README can also help other users find it more easily. - Make sure the node is contained in a new Python (.py) file. Preferrably, the node is in a repo with a README detaling the nodes usage & examples to help others more easily use your node.
- Submit a pull request with a link to your node(s) repo in GitHub against the `main` branch to add the node to the [Community Nodes](communityNodes.md) list - Submit a pull request with a link to your node(s) repo in GitHub against the `main` branch to add the node to the [Community Nodes](communityNodes.md) list
- Make sure you are following the template below and have provided all relevant details about the node and what it does. Example output images and workflows are very helpful for other users looking to use your node. - Make sure you are following the template below and have provided all relevant details about the node and what it does. Example output images and workflows are very helpful for other users looking to use your node.
- A maintainer will review the pull request and node. If the node is aligned with the direction of the project, you may be asked for permission to include it in the core project. - A maintainer will review the pull request and node. If the node is aligned with the direction of the project, you may be asked for permission to include it in the core project.

View File

@ -2,17 +2,13 @@
We've curated some example workflows for you to get started with Workflows in InvokeAI We've curated some example workflows for you to get started with Workflows in InvokeAI
To use them, right click on your desired workflow, follow the link to GitHub and click the "⬇" button to download the raw file. You can then use the "Load Workflow" functionality in InvokeAI to load the workflow and start generating images! To use them, right click on your desired workflow, press "Download Linked File". You can then use the "Load Workflow" functionality in InvokeAI to load the workflow and start generating images!
If you're interested in finding more workflows, checkout the [#share-your-workflows](https://discord.com/channels/1020123559063990373/1130291608097661000) channel in the InvokeAI Discord. If you're interested in finding more workflows, checkout the [#share-your-workflows](https://discord.com/channels/1020123559063990373/1130291608097661000) channel in the InvokeAI Discord.
* [SD1.5 / SD2 Text to Image](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/Text_to_Image.json) * [SD1.5 / SD2 Text to Image](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/Text_to_Image.json)
* [SDXL Text to Image](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/SDXL_Text_to_Image.json) * [SDXL Text to Image](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/SDXL_Text_to_Image.json)
* [SDXL Text to Image with Refiner](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/SDXL_w_Refiner_Text_to_Image.json) * [SDXL (with Refiner) Text to Image](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/SDXL_Text_to_Image.json)
* [Multi ControlNet (Canny & Depth)](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/Multi_ControlNet_Canny_and_Depth.json) * [Tiled Upscaling with ControlNet](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/ESRGAN_img2img_upscale w_Canny_ControlNet.json)
* [Tiled Upscaling with ControlNet](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/ESRGAN_img2img_upscale_w_Canny_ControlNet.json)
* [Prompt From File](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/Prompt_from_File.json)
* [Face Detailer with IP-Adapter & ControlNet](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/Face_Detailer_with_IP-Adapter_and_Canny.json.json)
* [FaceMask](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/FaceMask.json) * [FaceMask](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/FaceMask.json)
* [FaceOff with 2x Face Scaling](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/FaceOff_FaceScale2x.json) * [FaceOff with 2x Face Scaling](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/FaceOff_FaceScale2x.json)
* [QR Code Monster](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/QR_Code_Monster.json)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,985 +0,0 @@
{
"name": "Multi ControlNet (Canny & Depth)",
"author": "Millu",
"description": "A sample workflow using canny & depth ControlNets to guide the generation process. ",
"version": "0.1.0",
"contact": "millun@invoke.ai",
"tags": "ControlNet, canny, depth",
"notes": "",
"exposedFields": [
{
"nodeId": "54486974-835b-4d81-8f82-05f9f32ce9e9",
"fieldName": "model"
},
{
"nodeId": "7ce68934-3419-42d4-ac70-82cfc9397306",
"fieldName": "prompt"
},
{
"nodeId": "273e3f96-49ea-4dc5-9d5b-9660390f14e1",
"fieldName": "prompt"
},
{
"nodeId": "c4b23e64-7986-40c4-9cad-46327b12e204",
"fieldName": "image"
},
{
"nodeId": "8e860e51-5045-456e-bf04-9a62a2a5c49e",
"fieldName": "image"
}
],
"meta": {
"version": "1.0.0"
},
"nodes": [
{
"id": "8e860e51-5045-456e-bf04-9a62a2a5c49e",
"type": "invocation",
"data": {
"id": "8e860e51-5045-456e-bf04-9a62a2a5c49e",
"type": "image",
"inputs": {
"image": {
"id": "189c8adf-68cc-4774-a729-49da89f6fdf1",
"name": "image",
"type": "ImageField",
"fieldKind": "input",
"label": "Depth Input Image"
}
},
"outputs": {
"image": {
"id": "1a31cacd-9d19-4f32-b558-c5e4aa39ce73",
"name": "image",
"type": "ImageField",
"fieldKind": "output"
},
"width": {
"id": "12f298fd-1d11-4cca-9426-01240f7ec7cf",
"name": "width",
"type": "integer",
"fieldKind": "output"
},
"height": {
"id": "c47dabcb-44e8-40c9-992d-81dca59f598e",
"name": "height",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 225,
"position": {
"x": 3617.163483500202,
"y": 40.5529847930888
}
},
{
"id": "a33199c2-8340-401e-b8a2-42ffa875fc1c",
"type": "invocation",
"data": {
"id": "a33199c2-8340-401e-b8a2-42ffa875fc1c",
"type": "controlnet",
"inputs": {
"image": {
"id": "4e0a3172-d3c2-4005-a84c-fa12a404f8a0",
"name": "image",
"type": "ImageField",
"fieldKind": "input",
"label": ""
},
"control_model": {
"id": "8cb2d998-4086-430a-8b13-94cbc81e3ca3",
"name": "control_model",
"type": "ControlNetModelField",
"fieldKind": "input",
"label": "",
"value": {
"model_name": "sd-controlnet-depth",
"base_model": "sd-1"
}
},
"control_weight": {
"id": "5e32bd8a-9dc8-42d8-9bcc-c2b0460c0b0f",
"name": "control_weight",
"type": "FloatPolymorphic",
"fieldKind": "input",
"label": "",
"value": 1
},
"begin_step_percent": {
"id": "c258a276-352a-416c-8358-152f11005c0c",
"name": "begin_step_percent",
"type": "float",
"fieldKind": "input",
"label": "",
"value": 0
},
"end_step_percent": {
"id": "43001125-0d70-4f87-8e79-da6603ad6c33",
"name": "end_step_percent",
"type": "float",
"fieldKind": "input",
"label": "",
"value": 1
},
"control_mode": {
"id": "d2f14561-9443-4374-9270-e2f05007944e",
"name": "control_mode",
"type": "enum",
"fieldKind": "input",
"label": "",
"value": "balanced"
},
"resize_mode": {
"id": "727ee7d3-8bf6-4c7d-8b8a-43546b3b59cd",
"name": "resize_mode",
"type": "enum",
"fieldKind": "input",
"label": "",
"value": "just_resize"
}
},
"outputs": {
"control": {
"id": "b034aa0f-4d0d-46e4-b5e3-e25a9588d087",
"name": "control",
"type": "ControlField",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 508,
"position": {
"x": 4477.604342844504,
"y": -49.39005411272677
}
},
{
"id": "273e3f96-49ea-4dc5-9d5b-9660390f14e1",
"type": "invocation",
"data": {
"id": "273e3f96-49ea-4dc5-9d5b-9660390f14e1",
"type": "compel",
"inputs": {
"prompt": {
"id": "7c2c4771-2161-4d77-aced-ff8c4b3f1c15",
"name": "prompt",
"type": "string",
"fieldKind": "input",
"label": "Negative Prompt",
"value": ""
},
"clip": {
"id": "06d59e91-9cca-411d-bf05-86b099b3e8f7",
"name": "clip",
"type": "ClipField",
"fieldKind": "input",
"label": ""
}
},
"outputs": {
"conditioning": {
"id": "858bc33c-134c-4bf6-8855-f943e1d26f14",
"name": "conditioning",
"type": "ConditioningField",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 261,
"position": {
"x": 4444.706437017514,
"y": -924.0715320874991
}
},
{
"id": "54486974-835b-4d81-8f82-05f9f32ce9e9",
"type": "invocation",
"data": {
"id": "54486974-835b-4d81-8f82-05f9f32ce9e9",
"type": "main_model_loader",
"inputs": {
"model": {
"id": "f4a915a5-593e-4b6d-9198-c78eb5cefaed",
"name": "model",
"type": "MainModelField",
"fieldKind": "input",
"label": "",
"value": {
"model_name": "stable-diffusion-v1-5",
"base_model": "sd-1",
"model_type": "main"
}
}
},
"outputs": {
"unet": {
"id": "ee24fb16-da38-4c66-9fbc-e8f296ed40d2",
"name": "unet",
"type": "UNetField",
"fieldKind": "output"
},
"clip": {
"id": "f3fb0524-8803-41c1-86db-a61a13ee6a33",
"name": "clip",
"type": "ClipField",
"fieldKind": "output"
},
"vae": {
"id": "5c4878a8-b40f-44ab-b146-1c1f42c860b3",
"name": "vae",
"type": "VaeField",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 226,
"position": {
"x": 3837.096149678291,
"y": -1050.015351148365
}
},
{
"id": "7ce68934-3419-42d4-ac70-82cfc9397306",
"type": "invocation",
"data": {
"id": "7ce68934-3419-42d4-ac70-82cfc9397306",
"type": "compel",
"inputs": {
"prompt": {
"id": "7c2c4771-2161-4d77-aced-ff8c4b3f1c15",
"name": "prompt",
"type": "string",
"fieldKind": "input",
"label": "Positive Prompt",
"value": ""
},
"clip": {
"id": "06d59e91-9cca-411d-bf05-86b099b3e8f7",
"name": "clip",
"type": "ClipField",
"fieldKind": "input",
"label": ""
}
},
"outputs": {
"conditioning": {
"id": "858bc33c-134c-4bf6-8855-f943e1d26f14",
"name": "conditioning",
"type": "ConditioningField",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 261,
"position": {
"x": 4449.356038911986,
"y": -1201.659695420063
}
},
{
"id": "d204d184-f209-4fae-a0a1-d152800844e1",
"type": "invocation",
"data": {
"id": "d204d184-f209-4fae-a0a1-d152800844e1",
"type": "controlnet",
"inputs": {
"image": {
"id": "4e0a3172-d3c2-4005-a84c-fa12a404f8a0",
"name": "image",
"type": "ImageField",
"fieldKind": "input",
"label": ""
},
"control_model": {
"id": "8cb2d998-4086-430a-8b13-94cbc81e3ca3",
"name": "control_model",
"type": "ControlNetModelField",
"fieldKind": "input",
"label": "",
"value": {
"model_name": "sd-controlnet-canny",
"base_model": "sd-1"
}
},
"control_weight": {
"id": "5e32bd8a-9dc8-42d8-9bcc-c2b0460c0b0f",
"name": "control_weight",
"type": "FloatPolymorphic",
"fieldKind": "input",
"label": "",
"value": 1
},
"begin_step_percent": {
"id": "c258a276-352a-416c-8358-152f11005c0c",
"name": "begin_step_percent",
"type": "float",
"fieldKind": "input",
"label": "",
"value": 0
},
"end_step_percent": {
"id": "43001125-0d70-4f87-8e79-da6603ad6c33",
"name": "end_step_percent",
"type": "float",
"fieldKind": "input",
"label": "",
"value": 1
},
"control_mode": {
"id": "d2f14561-9443-4374-9270-e2f05007944e",
"name": "control_mode",
"type": "enum",
"fieldKind": "input",
"label": "",
"value": "balanced"
},
"resize_mode": {
"id": "727ee7d3-8bf6-4c7d-8b8a-43546b3b59cd",
"name": "resize_mode",
"type": "enum",
"fieldKind": "input",
"label": "",
"value": "just_resize"
}
},
"outputs": {
"control": {
"id": "b034aa0f-4d0d-46e4-b5e3-e25a9588d087",
"name": "control",
"type": "ControlField",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 508,
"position": {
"x": 4479.68542130465,
"y": -618.4221638099414
}
},
{
"id": "c4b23e64-7986-40c4-9cad-46327b12e204",
"type": "invocation",
"data": {
"id": "c4b23e64-7986-40c4-9cad-46327b12e204",
"type": "image",
"inputs": {
"image": {
"id": "189c8adf-68cc-4774-a729-49da89f6fdf1",
"name": "image",
"type": "ImageField",
"fieldKind": "input",
"label": "Canny Input Image"
}
},
"outputs": {
"image": {
"id": "1a31cacd-9d19-4f32-b558-c5e4aa39ce73",
"name": "image",
"type": "ImageField",
"fieldKind": "output"
},
"width": {
"id": "12f298fd-1d11-4cca-9426-01240f7ec7cf",
"name": "width",
"type": "integer",
"fieldKind": "output"
},
"height": {
"id": "c47dabcb-44e8-40c9-992d-81dca59f598e",
"name": "height",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 225,
"position": {
"x": 3593.7474460420153,
"y": -538.1200472386865
}
},
{
"id": "ca4d5059-8bfb-447f-b415-da0faba5a143",
"type": "invocation",
"data": {
"id": "ca4d5059-8bfb-447f-b415-da0faba5a143",
"type": "collect",
"inputs": {
"item": {
"id": "b16ae602-8708-4b1b-8d4f-9e0808d429ab",
"name": "item",
"type": "CollectionItem",
"fieldKind": "input",
"label": ""
}
},
"outputs": {
"collection": {
"id": "d8987dd8-dec8-4d94-816a-3e356af29884",
"name": "collection",
"type": "Collection",
"fieldKind": "output"
}
},
"label": "ControlNet Collection",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 104,
"position": {
"x": 4866.191497139488,
"y": -299.0538619537037
}
},
{
"id": "018b1214-c2af-43a7-9910-fb687c6726d7",
"type": "invocation",
"data": {
"id": "018b1214-c2af-43a7-9910-fb687c6726d7",
"type": "midas_depth_image_processor",
"inputs": {
"metadata": {
"id": "77f91980-c696-4a18-a9ea-6e2fc329a747",
"name": "metadata",
"type": "MetadataField",
"fieldKind": "input",
"label": ""
},
"image": {
"id": "50710a20-2af5-424d-9d17-aa08167829c6",
"name": "image",
"type": "ImageField",
"fieldKind": "input",
"label": ""
},
"a_mult": {
"id": "f3b26f9d-2498-415e-9c01-197a8d06c0a5",
"name": "a_mult",
"type": "float",
"fieldKind": "input",
"label": "",
"value": 2
},
"bg_th": {
"id": "4b1eb3ae-9d4a-47d6-b0ed-da62501e007f",
"name": "bg_th",
"type": "float",
"fieldKind": "input",
"label": "",
"value": 0.1
}
},
"outputs": {
"image": {
"id": "b4ed637c-c4a0-4fdd-a24e-36d6412e4ccf",
"name": "image",
"type": "ImageField",
"fieldKind": "output"
},
"width": {
"id": "6bf9b609-d72c-4239-99bd-390a73cc3a9c",
"name": "width",
"type": "integer",
"fieldKind": "output"
},
"height": {
"id": "3e8aef09-cf44-4e3e-a490-d3c9e7b23119",
"name": "height",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 339,
"position": {
"x": 4054.229311491893,
"y": -31.611411056365725
}
},
{
"id": "c826ba5e-9676-4475-b260-07b85e88753c",
"type": "invocation",
"data": {
"id": "c826ba5e-9676-4475-b260-07b85e88753c",
"type": "canny_image_processor",
"inputs": {
"metadata": {
"id": "08331ea6-99df-4e61-a919-204d9bfa8fb2",
"name": "metadata",
"type": "MetadataField",
"fieldKind": "input",
"label": ""
},
"image": {
"id": "33a37284-06ac-459c-ba93-1655e4f69b2d",
"name": "image",
"type": "ImageField",
"fieldKind": "input",
"label": ""
},
"low_threshold": {
"id": "21ec18a3-50c5-4ba1-9642-f921744d594f",
"name": "low_threshold",
"type": "integer",
"fieldKind": "input",
"label": "",
"value": 100
},
"high_threshold": {
"id": "ebeab271-a5ff-4c88-acfd-1d0271ab6ed4",
"name": "high_threshold",
"type": "integer",
"fieldKind": "input",
"label": "",
"value": 200
}
},
"outputs": {
"image": {
"id": "c0caadbf-883f-4cb4-a62d-626b9c81fc4e",
"name": "image",
"type": "ImageField",
"fieldKind": "output"
},
"width": {
"id": "df225843-8098-49c0-99d1-3b0b6600559f",
"name": "width",
"type": "integer",
"fieldKind": "output"
},
"height": {
"id": "e4abe0de-aa16-41f3-9cd7-968b49db5da3",
"name": "height",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 339,
"position": {
"x": 4095.757337055795,
"y": -455.63440891935863
}
},
{
"id": "9db25398-c869-4a63-8815-c6559341ef12",
"type": "invocation",
"data": {
"id": "9db25398-c869-4a63-8815-c6559341ef12",
"type": "l2i",
"inputs": {
"metadata": {
"id": "2f269793-72e5-4ff3-b76c-fab4f93e983f",
"name": "metadata",
"type": "MetadataField",
"fieldKind": "input",
"label": ""
},
"latents": {
"id": "4aaedd3b-cc77-420c-806e-c7fa74ec4cdf",
"name": "latents",
"type": "LatentsField",
"fieldKind": "input",
"label": ""
},
"vae": {
"id": "432b066a-2462-4d18-83d9-64620b72df45",
"name": "vae",
"type": "VaeField",
"fieldKind": "input",
"label": ""
},
"tiled": {
"id": "61f86e0f-7c46-40f8-b3f5-fe2f693595ca",
"name": "tiled",
"type": "boolean",
"fieldKind": "input",
"label": "",
"value": false
},
"fp32": {
"id": "39b6c89a-37ef-4a7e-9509-daeca49d5092",
"name": "fp32",
"type": "boolean",
"fieldKind": "input",
"label": "",
"value": false
}
},
"outputs": {
"image": {
"id": "6204e9b0-61dd-4250-b685-2092ba0e28e6",
"name": "image",
"type": "ImageField",
"fieldKind": "output"
},
"width": {
"id": "b4140649-8d5d-4d2d-bfa6-09e389ede5f9",
"name": "width",
"type": "integer",
"fieldKind": "output"
},
"height": {
"id": "f3a0c0c8-fc24-4646-8be1-ed8cdd140828",
"name": "height",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": false,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 267,
"position": {
"x": 5678.726701377887,
"y": -351.6792416734579
}
},
{
"id": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
"type": "invocation",
"data": {
"id": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
"type": "denoise_latents",
"inputs": {
"positive_conditioning": {
"id": "869cd309-c238-444b-a1a0-5021f99785ba",
"name": "positive_conditioning",
"type": "ConditioningField",
"fieldKind": "input",
"label": ""
},
"negative_conditioning": {
"id": "343447b4-1e37-4e9e-8ac7-4d04864066af",
"name": "negative_conditioning",
"type": "ConditioningField",
"fieldKind": "input",
"label": ""
},
"noise": {
"id": "b556571e-0cf9-4e03-8cfc-5caad937d957",
"name": "noise",
"type": "LatentsField",
"fieldKind": "input",
"label": ""
},
"steps": {
"id": "a3b3d2de-9308-423e-b00d-c209c3e6e808",
"name": "steps",
"type": "integer",
"fieldKind": "input",
"label": "",
"value": 10
},
"cfg_scale": {
"id": "b13c50a4-ec7e-4579-b0ef-2fe5df2605ea",
"name": "cfg_scale",
"type": "FloatPolymorphic",
"fieldKind": "input",
"label": "",
"value": 7.5
},
"denoising_start": {
"id": "57d5d755-f58f-4347-b991-f0bca4a0ab29",
"name": "denoising_start",
"type": "float",
"fieldKind": "input",
"label": "",
"value": 0
},
"denoising_end": {
"id": "323e78a6-880a-4d73-a62c-70faff965aa6",
"name": "denoising_end",
"type": "float",
"fieldKind": "input",
"label": "",
"value": 1
},
"scheduler": {
"id": "c25fdc17-a089-43ac-953e-067c45d5c76b",
"name": "scheduler",
"type": "Scheduler",
"fieldKind": "input",
"label": "",
"value": "euler"
},
"unet": {
"id": "6cde662b-e633-4569-b6b4-ec87c52c9c11",
"name": "unet",
"type": "UNetField",
"fieldKind": "input",
"label": ""
},
"control": {
"id": "276a4df9-bb26-4505-a4d3-a94e18c7b541",
"name": "control",
"type": "ControlPolymorphic",
"fieldKind": "input",
"label": ""
},
"ip_adapter": {
"id": "48d40c51-b5e2-4457-a428-eef0696695e8",
"name": "ip_adapter",
"type": "IPAdapterPolymorphic",
"fieldKind": "input",
"label": ""
},
"t2i_adapter": {
"id": "75dd8af2-e7d7-48b4-a574-edd9f6e686ad",
"name": "t2i_adapter",
"type": "T2IAdapterPolymorphic",
"fieldKind": "input",
"label": ""
},
"latents": {
"id": "9223d67b-1dd7-4b34-a45f-ed0a725d9702",
"name": "latents",
"type": "LatentsField",
"fieldKind": "input",
"label": ""
},
"denoise_mask": {
"id": "4ee99177-6923-4b7f-8fe0-d721dd7cb05b",
"name": "denoise_mask",
"type": "DenoiseMaskField",
"fieldKind": "input",
"label": ""
}
},
"outputs": {
"latents": {
"id": "7fb4e326-a974-43e8-9ee7-2e3ab235819d",
"name": "latents",
"type": "LatentsField",
"fieldKind": "output"
},
"width": {
"id": "6bb8acd0-8973-4195-a095-e376385dc705",
"name": "width",
"type": "integer",
"fieldKind": "output"
},
"height": {
"id": "795dea52-1c7d-4e64-99f7-2f60ec6e3ab9",
"name": "height",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.4.0"
},
"width": 320,
"height": 646,
"position": {
"x": 5274.672987098195,
"y": -823.0752416664332
}
}
],
"edges": [
{
"source": "54486974-835b-4d81-8f82-05f9f32ce9e9",
"sourceHandle": "clip",
"target": "7ce68934-3419-42d4-ac70-82cfc9397306",
"targetHandle": "clip",
"id": "reactflow__edge-54486974-835b-4d81-8f82-05f9f32ce9e9clip-7ce68934-3419-42d4-ac70-82cfc9397306clip",
"type": "default"
},
{
"source": "54486974-835b-4d81-8f82-05f9f32ce9e9",
"sourceHandle": "clip",
"target": "273e3f96-49ea-4dc5-9d5b-9660390f14e1",
"targetHandle": "clip",
"id": "reactflow__edge-54486974-835b-4d81-8f82-05f9f32ce9e9clip-273e3f96-49ea-4dc5-9d5b-9660390f14e1clip",
"type": "default"
},
{
"source": "a33199c2-8340-401e-b8a2-42ffa875fc1c",
"sourceHandle": "control",
"target": "ca4d5059-8bfb-447f-b415-da0faba5a143",
"targetHandle": "item",
"id": "reactflow__edge-a33199c2-8340-401e-b8a2-42ffa875fc1ccontrol-ca4d5059-8bfb-447f-b415-da0faba5a143item",
"type": "default"
},
{
"source": "d204d184-f209-4fae-a0a1-d152800844e1",
"sourceHandle": "control",
"target": "ca4d5059-8bfb-447f-b415-da0faba5a143",
"targetHandle": "item",
"id": "reactflow__edge-d204d184-f209-4fae-a0a1-d152800844e1control-ca4d5059-8bfb-447f-b415-da0faba5a143item",
"type": "default"
},
{
"source": "8e860e51-5045-456e-bf04-9a62a2a5c49e",
"sourceHandle": "image",
"target": "018b1214-c2af-43a7-9910-fb687c6726d7",
"targetHandle": "image",
"id": "reactflow__edge-8e860e51-5045-456e-bf04-9a62a2a5c49eimage-018b1214-c2af-43a7-9910-fb687c6726d7image",
"type": "default"
},
{
"source": "018b1214-c2af-43a7-9910-fb687c6726d7",
"sourceHandle": "image",
"target": "a33199c2-8340-401e-b8a2-42ffa875fc1c",
"targetHandle": "image",
"id": "reactflow__edge-018b1214-c2af-43a7-9910-fb687c6726d7image-a33199c2-8340-401e-b8a2-42ffa875fc1cimage",
"type": "default"
},
{
"source": "c4b23e64-7986-40c4-9cad-46327b12e204",
"sourceHandle": "image",
"target": "c826ba5e-9676-4475-b260-07b85e88753c",
"targetHandle": "image",
"id": "reactflow__edge-c4b23e64-7986-40c4-9cad-46327b12e204image-c826ba5e-9676-4475-b260-07b85e88753cimage",
"type": "default"
},
{
"source": "c826ba5e-9676-4475-b260-07b85e88753c",
"sourceHandle": "image",
"target": "d204d184-f209-4fae-a0a1-d152800844e1",
"targetHandle": "image",
"id": "reactflow__edge-c826ba5e-9676-4475-b260-07b85e88753cimage-d204d184-f209-4fae-a0a1-d152800844e1image",
"type": "default"
},
{
"source": "54486974-835b-4d81-8f82-05f9f32ce9e9",
"sourceHandle": "vae",
"target": "9db25398-c869-4a63-8815-c6559341ef12",
"targetHandle": "vae",
"id": "reactflow__edge-54486974-835b-4d81-8f82-05f9f32ce9e9vae-9db25398-c869-4a63-8815-c6559341ef12vae",
"type": "default"
},
{
"source": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
"sourceHandle": "latents",
"target": "9db25398-c869-4a63-8815-c6559341ef12",
"targetHandle": "latents",
"id": "reactflow__edge-ac481b7f-08bf-4a9d-9e0c-3a82ea5243celatents-9db25398-c869-4a63-8815-c6559341ef12latents",
"type": "default"
},
{
"source": "ca4d5059-8bfb-447f-b415-da0faba5a143",
"sourceHandle": "collection",
"target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
"targetHandle": "control",
"id": "reactflow__edge-ca4d5059-8bfb-447f-b415-da0faba5a143collection-ac481b7f-08bf-4a9d-9e0c-3a82ea5243cecontrol",
"type": "default"
},
{
"source": "54486974-835b-4d81-8f82-05f9f32ce9e9",
"sourceHandle": "unet",
"target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
"targetHandle": "unet",
"id": "reactflow__edge-54486974-835b-4d81-8f82-05f9f32ce9e9unet-ac481b7f-08bf-4a9d-9e0c-3a82ea5243ceunet",
"type": "default"
},
{
"source": "273e3f96-49ea-4dc5-9d5b-9660390f14e1",
"sourceHandle": "conditioning",
"target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
"targetHandle": "negative_conditioning",
"id": "reactflow__edge-273e3f96-49ea-4dc5-9d5b-9660390f14e1conditioning-ac481b7f-08bf-4a9d-9e0c-3a82ea5243cenegative_conditioning",
"type": "default"
},
{
"source": "7ce68934-3419-42d4-ac70-82cfc9397306",
"sourceHandle": "conditioning",
"target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
"targetHandle": "positive_conditioning",
"id": "reactflow__edge-7ce68934-3419-42d4-ac70-82cfc9397306conditioning-ac481b7f-08bf-4a9d-9e0c-3a82ea5243cepositive_conditioning",
"type": "default"
}
]
}

View File

@ -1,719 +0,0 @@
{
"name": "Prompt from File",
"author": "InvokeAI",
"description": "Sample workflow using prompt from file capabilities of InvokeAI ",
"version": "0.1.0",
"contact": "millun@invoke.ai",
"tags": "text2image, prompt from file, default",
"notes": "",
"exposedFields": [
{
"nodeId": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
"fieldName": "model"
},
{
"nodeId": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
"fieldName": "file_path"
}
],
"meta": {
"version": "1.0.0"
},
"nodes": [
{
"id": "c2eaf1ba-5708-4679-9e15-945b8b432692",
"type": "invocation",
"data": {
"id": "c2eaf1ba-5708-4679-9e15-945b8b432692",
"type": "compel",
"inputs": {
"prompt": {
"id": "dcdf3f6d-9b96-4bcd-9b8d-f992fefe4f62",
"name": "prompt",
"type": "string",
"fieldKind": "input",
"label": "",
"value": ""
},
"clip": {
"id": "3f1981c9-d8a9-42eb-a739-4f120eb80745",
"name": "clip",
"type": "ClipField",
"fieldKind": "input",
"label": ""
}
},
"outputs": {
"conditioning": {
"id": "46205e6c-c5e2-44cb-9c82-1cd20b95674a",
"name": "conditioning",
"type": "ConditioningField",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 261,
"position": {
"x": 1177.3417789657444,
"y": -102.0924766641035
}
},
{
"id": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
"type": "invocation",
"data": {
"id": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
"type": "prompt_from_file",
"inputs": {
"file_path": {
"id": "37e37684-4f30-4ec8-beae-b333e550f904",
"name": "file_path",
"type": "string",
"fieldKind": "input",
"label": "Prompts File Path",
"value": ""
},
"pre_prompt": {
"id": "7de02feb-819a-4992-bad3-72a30920ddea",
"name": "pre_prompt",
"type": "string",
"fieldKind": "input",
"label": "",
"value": ""
},
"post_prompt": {
"id": "95f191d8-a282-428e-bd65-de8cb9b7513a",
"name": "post_prompt",
"type": "string",
"fieldKind": "input",
"label": "",
"value": ""
},
"start_line": {
"id": "efee9a48-05ab-4829-8429-becfa64a0782",
"name": "start_line",
"type": "integer",
"fieldKind": "input",
"label": "",
"value": 1
},
"max_prompts": {
"id": "abebb428-3d3d-49fd-a482-4e96a16fff08",
"name": "max_prompts",
"type": "integer",
"fieldKind": "input",
"label": "",
"value": 1
}
},
"outputs": {
"collection": {
"id": "77d5d7f1-9877-4ab1-9a8c-33e9ffa9abf3",
"name": "collection",
"type": "StringCollection",
"fieldKind": "output"
}
},
"label": "Prompts from File",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 589,
"position": {
"x": 394.181884547075,
"y": -423.5345157864633
}
},
{
"id": "1b89067c-3f6b-42c8-991f-e3055789b251",
"type": "invocation",
"data": {
"id": "1b89067c-3f6b-42c8-991f-e3055789b251",
"type": "iterate",
"inputs": {
"collection": {
"id": "4c564bf8-5ed6-441e-ad2c-dda265d5785f",
"name": "collection",
"type": "Collection",
"fieldKind": "input",
"label": "",
"value": []
}
},
"outputs": {
"item": {
"id": "36340f9a-e7a5-4afa-b4b5-313f4e292380",
"name": "item",
"type": "CollectionItem",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 104,
"position": {
"x": 792.8735298060233,
"y": -432.6964953027252
}
},
{
"id": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
"type": "invocation",
"data": {
"id": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
"type": "main_model_loader",
"inputs": {
"model": {
"id": "3f264259-3418-47d5-b90d-b6600e36ae46",
"name": "model",
"type": "MainModelField",
"fieldKind": "input",
"label": "",
"value": {
"model_name": "stable-diffusion-v1-5",
"base_model": "sd-1",
"model_type": "main"
}
}
},
"outputs": {
"unet": {
"id": "8e182ea2-9d0a-4c02-9407-27819288d4b5",
"name": "unet",
"type": "UNetField",
"fieldKind": "output"
},
"clip": {
"id": "d67d9d30-058c-46d5-bded-3d09d6d1aa39",
"name": "clip",
"type": "ClipField",
"fieldKind": "output"
},
"vae": {
"id": "89641601-0429-4448-98d5-190822d920d8",
"name": "vae",
"type": "VaeField",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 226,
"position": {
"x": -47.66201354137797,
"y": -299.218193067033
}
},
{
"id": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
"type": "invocation",
"data": {
"id": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
"type": "compel",
"inputs": {
"prompt": {
"id": "dcdf3f6d-9b96-4bcd-9b8d-f992fefe4f62",
"name": "prompt",
"type": "string",
"fieldKind": "input",
"label": "",
"value": ""
},
"clip": {
"id": "3f1981c9-d8a9-42eb-a739-4f120eb80745",
"name": "clip",
"type": "ClipField",
"fieldKind": "input",
"label": ""
}
},
"outputs": {
"conditioning": {
"id": "46205e6c-c5e2-44cb-9c82-1cd20b95674a",
"name": "conditioning",
"type": "ConditioningField",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 261,
"position": {
"x": 1175.0187896425462,
"y": -420.64289413577114
}
},
{
"id": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
"type": "invocation",
"data": {
"id": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
"type": "noise",
"inputs": {
"seed": {
"id": "b722d84a-eeee-484f-bef2-0250c027cb67",
"name": "seed",
"type": "integer",
"fieldKind": "input",
"label": "",
"value": 0
},
"width": {
"id": "d5f8ce11-0502-4bfc-9a30-5757dddf1f94",
"name": "width",
"type": "integer",
"fieldKind": "input",
"label": "",
"value": 512
},
"height": {
"id": "f187d5ff-38a5-4c3f-b780-fc5801ef34af",
"name": "height",
"type": "integer",
"fieldKind": "input",
"label": "",
"value": 512
},
"use_cpu": {
"id": "12f112b8-8b76-4816-b79e-662edc9f9aa5",
"name": "use_cpu",
"type": "boolean",
"fieldKind": "input",
"label": "",
"value": true
}
},
"outputs": {
"noise": {
"id": "08576ad1-96d9-42d2-96ef-6f5c1961933f",
"name": "noise",
"type": "LatentsField",
"fieldKind": "output"
},
"width": {
"id": "f3e1f94a-258d-41ff-9789-bd999bd9f40d",
"name": "width",
"type": "integer",
"fieldKind": "output"
},
"height": {
"id": "6cefc357-4339-415e-a951-49b9c2be32f4",
"name": "height",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 389,
"position": {
"x": 809.1964864135837,
"y": 183.2735123359796
}
},
{
"id": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
"type": "invocation",
"data": {
"id": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
"type": "rand_int",
"inputs": {
"low": {
"id": "b9fc6cf1-469c-4037-9bf0-04836965826f",
"name": "low",
"type": "integer",
"fieldKind": "input",
"label": "",
"value": 0
},
"high": {
"id": "06eac725-0f60-4ba2-b8cd-7ad9f757488c",
"name": "high",
"type": "integer",
"fieldKind": "input",
"label": "",
"value": 2147483647
}
},
"outputs": {
"value": {
"id": "df08c84e-7346-4e92-9042-9e5cb773aaff",
"name": "value",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": false,
"version": "1.0.0"
},
"width": 320,
"height": 218,
"position": {
"x": 354.19913145404166,
"y": 301.86324846905165
}
},
{
"id": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
"type": "invocation",
"data": {
"id": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
"type": "l2i",
"inputs": {
"metadata": {
"id": "022e4b33-562b-438d-b7df-41c3fd931f40",
"name": "metadata",
"type": "MetadataField",
"fieldKind": "input",
"label": ""
},
"latents": {
"id": "67cb6c77-a394-4a66-a6a9-a0a7dcca69ec",
"name": "latents",
"type": "LatentsField",
"fieldKind": "input",
"label": ""
},
"vae": {
"id": "7b3fd9ad-a4ef-4e04-89fa-3832a9902dbd",
"name": "vae",
"type": "VaeField",
"fieldKind": "input",
"label": ""
},
"tiled": {
"id": "5ac5680d-3add-4115-8ec0-9ef5bb87493b",
"name": "tiled",
"type": "boolean",
"fieldKind": "input",
"label": "",
"value": false
},
"fp32": {
"id": "db8297f5-55f8-452f-98cf-6572c2582152",
"name": "fp32",
"type": "boolean",
"fieldKind": "input",
"label": "",
"value": false
}
},
"outputs": {
"image": {
"id": "d8778d0c-592a-4960-9280-4e77e00a7f33",
"name": "image",
"type": "ImageField",
"fieldKind": "output"
},
"width": {
"id": "c8b0a75a-f5de-4ff2-9227-f25bb2b97bec",
"name": "width",
"type": "integer",
"fieldKind": "output"
},
"height": {
"id": "83c05fbf-76b9-49ab-93c4-fa4b10e793e4",
"name": "height",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 267,
"position": {
"x": 2037.861329274915,
"y": -329.8393457509562
}
},
{
"id": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
"type": "invocation",
"data": {
"id": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
"type": "denoise_latents",
"inputs": {
"positive_conditioning": {
"id": "751fb35b-3f23-45ce-af1c-053e74251337",
"name": "positive_conditioning",
"type": "ConditioningField",
"fieldKind": "input",
"label": ""
},
"negative_conditioning": {
"id": "b9dc06b6-7481-4db1-a8c2-39d22a5eacff",
"name": "negative_conditioning",
"type": "ConditioningField",
"fieldKind": "input",
"label": ""
},
"noise": {
"id": "6e15e439-3390-48a4-8031-01e0e19f0e1d",
"name": "noise",
"type": "LatentsField",
"fieldKind": "input",
"label": ""
},
"steps": {
"id": "bfdfb3df-760b-4d51-b17b-0abb38b976c2",
"name": "steps",
"type": "integer",
"fieldKind": "input",
"label": "",
"value": 10
},
"cfg_scale": {
"id": "47770858-322e-41af-8494-d8b63ed735f3",
"name": "cfg_scale",
"type": "FloatPolymorphic",
"fieldKind": "input",
"label": "",
"value": 7.5
},
"denoising_start": {
"id": "2ba78720-ee02-4130-a348-7bc3531f790b",
"name": "denoising_start",
"type": "float",
"fieldKind": "input",
"label": "",
"value": 0
},
"denoising_end": {
"id": "a874dffb-d433-4d1a-9f59-af4367bb05e4",
"name": "denoising_end",
"type": "float",
"fieldKind": "input",
"label": "",
"value": 1
},
"scheduler": {
"id": "36e021ad-b762-4fe4-ad4d-17f0291c40b2",
"name": "scheduler",
"type": "Scheduler",
"fieldKind": "input",
"label": "",
"value": "euler"
},
"unet": {
"id": "98d3282d-f9f6-4b5e-b9e8-58658f1cac78",
"name": "unet",
"type": "UNetField",
"fieldKind": "input",
"label": ""
},
"control": {
"id": "f2ea3216-43d5-42b4-887f-36e8f7166d53",
"name": "control",
"type": "ControlPolymorphic",
"fieldKind": "input",
"label": ""
},
"ip_adapter": {
"id": "d0780610-a298-47c8-a54e-70e769e0dfe2",
"name": "ip_adapter",
"type": "IPAdapterPolymorphic",
"fieldKind": "input",
"label": ""
},
"t2i_adapter": {
"id": "fdb40970-185e-4ea8-8bb5-88f06f91f46a",
"name": "t2i_adapter",
"type": "T2IAdapterPolymorphic",
"fieldKind": "input",
"label": ""
},
"latents": {
"id": "e05b538a-1b5a-4aa5-84b1-fd2361289a81",
"name": "latents",
"type": "LatentsField",
"fieldKind": "input",
"label": ""
},
"denoise_mask": {
"id": "463a419e-df30-4382-8ffb-b25b25abe425",
"name": "denoise_mask",
"type": "DenoiseMaskField",
"fieldKind": "input",
"label": ""
}
},
"outputs": {
"latents": {
"id": "559ee688-66cf-4139-8b82-3d3aa69995ce",
"name": "latents",
"type": "LatentsField",
"fieldKind": "output"
},
"width": {
"id": "0b4285c2-e8b9-48e5-98f6-0a49d3f98fd2",
"name": "width",
"type": "integer",
"fieldKind": "output"
},
"height": {
"id": "8b0881b9-45e5-47d5-b526-24b6661de0ee",
"name": "height",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.4.0"
},
"width": 320,
"height": 646,
"position": {
"x": 1570.9941088179146,
"y": -407.6505491604564
}
}
],
"edges": [
{
"source": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
"sourceHandle": "collection",
"target": "1b89067c-3f6b-42c8-991f-e3055789b251",
"targetHandle": "collection",
"id": "reactflow__edge-1b7e0df8-8589-4915-a4ea-c0088f15d642collection-1b89067c-3f6b-42c8-991f-e3055789b251collection",
"type": "default"
},
{
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
"sourceHandle": "clip",
"target": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
"targetHandle": "clip",
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426clip-fc9d0e35-a6de-4a19-84e1-c72497c823f6clip",
"type": "default"
},
{
"source": "1b89067c-3f6b-42c8-991f-e3055789b251",
"sourceHandle": "item",
"target": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
"targetHandle": "prompt",
"id": "reactflow__edge-1b89067c-3f6b-42c8-991f-e3055789b251item-fc9d0e35-a6de-4a19-84e1-c72497c823f6prompt",
"type": "default"
},
{
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
"sourceHandle": "clip",
"target": "c2eaf1ba-5708-4679-9e15-945b8b432692",
"targetHandle": "clip",
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426clip-c2eaf1ba-5708-4679-9e15-945b8b432692clip",
"type": "default"
},
{
"source": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
"sourceHandle": "value",
"target": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
"targetHandle": "seed",
"id": "reactflow__edge-dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5value-0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77seed",
"type": "default"
},
{
"source": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
"sourceHandle": "conditioning",
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
"targetHandle": "positive_conditioning",
"id": "reactflow__edge-fc9d0e35-a6de-4a19-84e1-c72497c823f6conditioning-2fb1577f-0a56-4f12-8711-8afcaaaf1d5epositive_conditioning",
"type": "default"
},
{
"source": "c2eaf1ba-5708-4679-9e15-945b8b432692",
"sourceHandle": "conditioning",
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
"targetHandle": "negative_conditioning",
"id": "reactflow__edge-c2eaf1ba-5708-4679-9e15-945b8b432692conditioning-2fb1577f-0a56-4f12-8711-8afcaaaf1d5enegative_conditioning",
"type": "default"
},
{
"source": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
"sourceHandle": "noise",
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
"targetHandle": "noise",
"id": "reactflow__edge-0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77noise-2fb1577f-0a56-4f12-8711-8afcaaaf1d5enoise",
"type": "default"
},
{
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
"sourceHandle": "unet",
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
"targetHandle": "unet",
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426unet-2fb1577f-0a56-4f12-8711-8afcaaaf1d5eunet",
"type": "default"
},
{
"source": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
"sourceHandle": "latents",
"target": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
"targetHandle": "latents",
"id": "reactflow__edge-2fb1577f-0a56-4f12-8711-8afcaaaf1d5elatents-491ec988-3c77-4c37-af8a-39a0c4e7a2a1latents",
"type": "default"
},
{
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
"sourceHandle": "vae",
"target": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
"targetHandle": "vae",
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426vae-491ec988-3c77-4c37-af8a-39a0c4e7a2a1vae",
"type": "default"
}
]
}

View File

@ -1,758 +0,0 @@
{
"name": "QR Code Monster",
"author": "InvokeAI",
"description": "Sample workflow for create images with QR code Monster ControlNet",
"version": "1.0.1",
"contact": "invoke@invoke.ai",
"tags": "qrcode, controlnet, default",
"notes": "",
"exposedFields": [
{
"nodeId": "a6cc0986-f928-4a7e-8d44-ba2d4b36f54a",
"fieldName": "image"
},
{
"nodeId": "aca3b054-bfba-4392-bd20-6476f59504df",
"fieldName": "prompt"
},
{
"nodeId": "3db7cee0-31e2-4a3d-94a1-268cb16177dd",
"fieldName": "prompt"
}
],
"meta": {
"version": "1.0.0"
},
"nodes": [
{
"id": "3db7cee0-31e2-4a3d-94a1-268cb16177dd",
"type": "invocation",
"data": {
"id": "3db7cee0-31e2-4a3d-94a1-268cb16177dd",
"type": "compel",
"inputs": {
"prompt": {
"id": "6a1fe244-5656-4f8c-91d1-1fb474e28807",
"name": "prompt",
"type": "string",
"fieldKind": "input",
"label": "Negative Prompt",
"value": ""
},
"clip": {
"id": "f24688f3-29b8-4a2d-8603-046e5a5c7250",
"name": "clip",
"type": "ClipField",
"fieldKind": "input",
"label": ""
}
},
"outputs": {
"conditioning": {
"id": "700528eb-3f8b-4745-b540-34f919b5b228",
"name": "conditioning",
"type": "ConditioningField",
"fieldKind": "output"
}
},
"label": "Prompt",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 261,
"position": {
"x": 773.0502679628016,
"y": 1622.4836086770556
}
},
{
"id": "610384f1-6f0c-4847-a9a2-37ce7f456ed1",
"type": "invocation",
"data": {
"id": "610384f1-6f0c-4847-a9a2-37ce7f456ed1",
"type": "main_model_loader",
"inputs": {
"model": {
"id": "cb36b6d3-6c1f-4911-a200-646745b0ff74",
"name": "model",
"type": "MainModelField",
"fieldKind": "input",
"label": "",
"value": {
"model_name": "stable-diffusion-v1-5",
"base_model": "sd-1",
"model_type": "main"
}
}
},
"outputs": {
"unet": {
"id": "7246895b-b252-49bc-b952-8d801b4672f7",
"name": "unet",
"type": "UNetField",
"fieldKind": "output"
},
"clip": {
"id": "3c2aedb8-30d5-4d4b-99df-d06a0d7bedc6",
"name": "clip",
"type": "ClipField",
"fieldKind": "output"
},
"vae": {
"id": "b9743815-5501-4bbb-8bde-8bd6ba298a4e",
"name": "vae",
"type": "VaeField",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 226,
"position": {
"x": 211.58866462619744,
"y": 1376.0542388105248
}
},
{
"id": "aca3b054-bfba-4392-bd20-6476f59504df",
"type": "invocation",
"data": {
"id": "aca3b054-bfba-4392-bd20-6476f59504df",
"type": "compel",
"inputs": {
"prompt": {
"id": "6a1fe244-5656-4f8c-91d1-1fb474e28807",
"name": "prompt",
"type": "string",
"fieldKind": "input",
"label": "Positive Prompt",
"value": ""
},
"clip": {
"id": "f24688f3-29b8-4a2d-8603-046e5a5c7250",
"name": "clip",
"type": "ClipField",
"fieldKind": "input",
"label": ""
}
},
"outputs": {
"conditioning": {
"id": "700528eb-3f8b-4745-b540-34f919b5b228",
"name": "conditioning",
"type": "ConditioningField",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 261,
"position": {
"x": 770.6491131680111,
"y": 1316.379247112241
}
},
{
"id": "a6cc0986-f928-4a7e-8d44-ba2d4b36f54a",
"type": "invocation",
"data": {
"id": "a6cc0986-f928-4a7e-8d44-ba2d4b36f54a",
"type": "image",
"inputs": {
"image": {
"id": "89ba5d58-28c9-4e04-a5df-79fb7a6f3531",
"name": "image",
"type": "ImageField",
"fieldKind": "input",
"label": "QR Code / Hidden Image"
}
},
"outputs": {
"image": {
"id": "54335653-0e17-42da-b9e8-83c5fb5af670",
"name": "image",
"type": "ImageField",
"fieldKind": "output"
},
"width": {
"id": "a3c65953-39ea-4d97-8858-d65154ff9d11",
"name": "width",
"type": "integer",
"fieldKind": "output"
},
"height": {
"id": "2c7db511-ebc9-4286-a46b-bc11e0fd779f",
"name": "height",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 225,
"position": {
"x": 700.5034176864369,
"y": 1981.749600549388
}
},
{
"id": "280fd8a7-3b0c-49fe-8be4-6246e08b6c9a",
"type": "invocation",
"data": {
"id": "280fd8a7-3b0c-49fe-8be4-6246e08b6c9a",
"type": "noise",
"inputs": {
"seed": {
"id": "7c6c76dd-127b-4829-b1ec-430790cb7ed7",
"name": "seed",
"type": "integer",
"fieldKind": "input",
"label": "",
"value": 0
},
"width": {
"id": "8ec6a525-a421-40d8-a17e-39e7b6836438",
"name": "width",
"type": "integer",
"fieldKind": "input",
"label": "",
"value": 512
},
"height": {
"id": "6af1e58a-e2ee-4ec4-9f06-d8d0412922ca",
"name": "height",
"type": "integer",
"fieldKind": "input",
"label": "",
"value": 512
},
"use_cpu": {
"id": "26662e99-5720-43a6-a5d8-06c9dab0e261",
"name": "use_cpu",
"type": "boolean",
"fieldKind": "input",
"label": "",
"value": true
}
},
"outputs": {
"noise": {
"id": "cb4c4dfc-a744-49eb-af4f-677448e28407",
"name": "noise",
"type": "LatentsField",
"fieldKind": "output"
},
"width": {
"id": "97e87be6-e81f-40a3-a522-28ebe4aad0ac",
"name": "width",
"type": "integer",
"fieldKind": "output"
},
"height": {
"id": "80784420-f1e1-47b0-bd1d-1d381a15e22d",
"name": "height",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": false,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 32,
"position": {
"x": 1182.460291960481,
"y": 1759.592972960265
}
},
{
"id": "2ac03cf6-0326-454a-bed0-d8baef2bf30d",
"type": "invocation",
"data": {
"id": "2ac03cf6-0326-454a-bed0-d8baef2bf30d",
"type": "controlnet",
"inputs": {
"image": {
"id": "1f683889-9f14-40c8-af29-4b991b211a3a",
"name": "image",
"type": "ImageField",
"fieldKind": "input",
"label": ""
},
"control_model": {
"id": "a933b21d-22c1-4e06-818f-15416b971282",
"name": "control_model",
"type": "ControlNetModelField",
"fieldKind": "input",
"label": "",
"value": {
"model_name": "qrcode_monster",
"base_model": "sd-1"
}
},
"control_weight": {
"id": "198a0825-e55e-4496-bc54-c3d7b02f3d75",
"name": "control_weight",
"type": "FloatPolymorphic",
"fieldKind": "input",
"label": "",
"value": 1.4
},
"begin_step_percent": {
"id": "c85ce42f-22af-42a0-8993-676002fb275e",
"name": "begin_step_percent",
"type": "float",
"fieldKind": "input",
"label": "",
"value": 0
},
"end_step_percent": {
"id": "a61a65c4-9e6f-4fe2-96a5-1294d17ec6e4",
"name": "end_step_percent",
"type": "float",
"fieldKind": "input",
"label": "",
"value": 1
},
"control_mode": {
"id": "1aa45cfa-0249-46b7-bf24-3e38e92f5fa0",
"name": "control_mode",
"type": "enum",
"fieldKind": "input",
"label": "",
"value": "balanced"
},
"resize_mode": {
"id": "a89d3cb9-a141-4cea-bb49-977bf267377b",
"name": "resize_mode",
"type": "enum",
"fieldKind": "input",
"label": "",
"value": "just_resize"
}
},
"outputs": {
"control": {
"id": "c9a1fc7e-cb25-45a9-adff-1a97c9ff04d6",
"name": "control",
"type": "ControlField",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 508,
"position": {
"x": 1165.434407461108,
"y": 1862.916856351665
}
},
{
"id": "28542b66-5a00-4780-a318-0a036d2df914",
"type": "invocation",
"data": {
"id": "28542b66-5a00-4780-a318-0a036d2df914",
"type": "l2i",
"inputs": {
"metadata": {
"id": "a38e8f55-7f2c-4fcc-a71f-d51e2eb0374a",
"name": "metadata",
"type": "MetadataField",
"fieldKind": "input",
"label": ""
},
"latents": {
"id": "80e97bc8-e716-4175-9115-5b58495aa30c",
"name": "latents",
"type": "LatentsField",
"fieldKind": "input",
"label": ""
},
"vae": {
"id": "5641bce6-ac2b-47eb-bb32-2f290026b7e1",
"name": "vae",
"type": "VaeField",
"fieldKind": "input",
"label": ""
},
"tiled": {
"id": "9e75eb16-ae48-47ed-b180-e0409d377436",
"name": "tiled",
"type": "boolean",
"fieldKind": "input",
"label": "",
"value": false
},
"fp32": {
"id": "0518b0ce-ee37-437b-8437-cc2976a3279f",
"name": "fp32",
"type": "boolean",
"fieldKind": "input",
"label": "",
"value": false
}
},
"outputs": {
"image": {
"id": "ec2ff985-a7eb-401f-92c4-1217cddad6a2",
"name": "image",
"type": "ImageField",
"fieldKind": "output"
},
"width": {
"id": "ba1d1720-6d67-4eca-9e9d-b97d08636774",
"name": "width",
"type": "integer",
"fieldKind": "output"
},
"height": {
"id": "10bcf8f4-6394-422f-b0c0-51680f3bfb25",
"name": "height",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 267,
"position": {
"x": 2110.8415693683014,
"y": 1487.253341116115
}
},
{
"id": "9755ae4c-ef30-4db3-80f6-a31f98979a11",
"type": "invocation",
"data": {
"id": "9755ae4c-ef30-4db3-80f6-a31f98979a11",
"type": "denoise_latents",
"inputs": {
"positive_conditioning": {
"id": "8e6aceaa-a986-4ab2-9c04-5b1027b3daf6",
"name": "positive_conditioning",
"type": "ConditioningField",
"fieldKind": "input",
"label": ""
},
"negative_conditioning": {
"id": "fbbaa712-ca1a-420b-9016-763f2a29d68c",
"name": "negative_conditioning",
"type": "ConditioningField",
"fieldKind": "input",
"label": ""
},
"noise": {
"id": "a3b3d5d2-c0f9-4b89-a9b3-8de9418f7bb5",
"name": "noise",
"type": "LatentsField",
"fieldKind": "input",
"label": ""
},
"steps": {
"id": "e491e664-2f8c-4f49-b3e4-57b051fbb9c5",
"name": "steps",
"type": "integer",
"fieldKind": "input",
"label": "",
"value": 10
},
"cfg_scale": {
"id": "f0318abd-ed65-4cad-86a7-48d1c19a6d14",
"name": "cfg_scale",
"type": "FloatPolymorphic",
"fieldKind": "input",
"label": "",
"value": 7.5
},
"denoising_start": {
"id": "f7c24c51-496f-44c4-836a-c734e529fec0",
"name": "denoising_start",
"type": "float",
"fieldKind": "input",
"label": "",
"value": 0
},
"denoising_end": {
"id": "54f7656a-fb0d-4d9e-a459-f700f7dccd2e",
"name": "denoising_end",
"type": "float",
"fieldKind": "input",
"label": "",
"value": 1
},
"scheduler": {
"id": "363ee440-040d-499b-bf84-bf5391b08681",
"name": "scheduler",
"type": "Scheduler",
"fieldKind": "input",
"label": "",
"value": "euler"
},
"unet": {
"id": "5c93d4e5-1064-4700-ab1d-d12e1e9b5ba7",
"name": "unet",
"type": "UNetField",
"fieldKind": "input",
"label": ""
},
"control": {
"id": "e1948eb3-7407-43b0-93e3-139470f186b7",
"name": "control",
"type": "ControlPolymorphic",
"fieldKind": "input",
"label": ""
},
"ip_adapter": {
"id": "5675b2c3-adfb-49ee-b33c-26bdbfab1fed",
"name": "ip_adapter",
"type": "IPAdapterPolymorphic",
"fieldKind": "input",
"label": ""
},
"t2i_adapter": {
"id": "89cd4ab3-3bfc-4063-9de5-91d42305c651",
"name": "t2i_adapter",
"type": "T2IAdapterPolymorphic",
"fieldKind": "input",
"label": ""
},
"latents": {
"id": "ec01df90-5042-418d-b6d6-86b251c13770",
"name": "latents",
"type": "LatentsField",
"fieldKind": "input",
"label": ""
},
"denoise_mask": {
"id": "561cde00-cb20-42ae-9bd3-4f477f73fbe1",
"name": "denoise_mask",
"type": "DenoiseMaskField",
"fieldKind": "input",
"label": ""
}
},
"outputs": {
"latents": {
"id": "f9addefe-efcc-4e01-8945-6ebbc934b002",
"name": "latents",
"type": "LatentsField",
"fieldKind": "output"
},
"width": {
"id": "6d48f78b-d681-422a-8677-0111bd0625f1",
"name": "width",
"type": "integer",
"fieldKind": "output"
},
"height": {
"id": "f25997b8-6316-44ce-b696-b82e4ed51ae5",
"name": "height",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": true,
"version": "1.4.0"
},
"width": 320,
"height": 646,
"position": {
"x": 1597.9598293300219,
"y": 1420.4637727891632
}
},
{
"id": "59349822-af20-4e0e-a53f-3ba135d00c3f",
"type": "invocation",
"data": {
"id": "59349822-af20-4e0e-a53f-3ba135d00c3f",
"type": "rand_int",
"inputs": {
"low": {
"id": "051f22f9-2d4f-414f-bc51-84af2d626efa",
"name": "low",
"type": "integer",
"fieldKind": "input",
"label": "",
"value": 0
},
"high": {
"id": "77206186-f264-4224-9589-f925cf903dc9",
"name": "high",
"type": "integer",
"fieldKind": "input",
"label": "",
"value": 2147483647
}
},
"outputs": {
"value": {
"id": "a7ed9387-3a24-4d34-b7c5-f713bd544ab1",
"name": "value",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": false,
"notes": "",
"embedWorkflow": false,
"isIntermediate": true,
"useCache": false,
"version": "1.0.0"
},
"width": 320,
"height": 32,
"position": {
"x": 1178.16746986153,
"y": 1663.9433412808876
}
}
],
"edges": [
{
"source": "59349822-af20-4e0e-a53f-3ba135d00c3f",
"target": "280fd8a7-3b0c-49fe-8be4-6246e08b6c9a",
"id": "59349822-af20-4e0e-a53f-3ba135d00c3f-280fd8a7-3b0c-49fe-8be4-6246e08b6c9a-collapsed",
"type": "collapsed"
},
{
"source": "610384f1-6f0c-4847-a9a2-37ce7f456ed1",
"sourceHandle": "clip",
"target": "aca3b054-bfba-4392-bd20-6476f59504df",
"targetHandle": "clip",
"id": "reactflow__edge-610384f1-6f0c-4847-a9a2-37ce7f456ed1clip-aca3b054-bfba-4392-bd20-6476f59504dfclip",
"type": "default"
},
{
"source": "610384f1-6f0c-4847-a9a2-37ce7f456ed1",
"sourceHandle": "clip",
"target": "3db7cee0-31e2-4a3d-94a1-268cb16177dd",
"targetHandle": "clip",
"id": "reactflow__edge-610384f1-6f0c-4847-a9a2-37ce7f456ed1clip-3db7cee0-31e2-4a3d-94a1-268cb16177ddclip",
"type": "default"
},
{
"source": "a6cc0986-f928-4a7e-8d44-ba2d4b36f54a",
"sourceHandle": "image",
"target": "2ac03cf6-0326-454a-bed0-d8baef2bf30d",
"targetHandle": "image",
"id": "reactflow__edge-a6cc0986-f928-4a7e-8d44-ba2d4b36f54aimage-2ac03cf6-0326-454a-bed0-d8baef2bf30dimage",
"type": "default"
},
{
"source": "610384f1-6f0c-4847-a9a2-37ce7f456ed1",
"sourceHandle": "vae",
"target": "28542b66-5a00-4780-a318-0a036d2df914",
"targetHandle": "vae",
"id": "reactflow__edge-610384f1-6f0c-4847-a9a2-37ce7f456ed1vae-28542b66-5a00-4780-a318-0a036d2df914vae",
"type": "default"
},
{
"source": "280fd8a7-3b0c-49fe-8be4-6246e08b6c9a",
"sourceHandle": "noise",
"target": "9755ae4c-ef30-4db3-80f6-a31f98979a11",
"targetHandle": "noise",
"id": "reactflow__edge-280fd8a7-3b0c-49fe-8be4-6246e08b6c9anoise-9755ae4c-ef30-4db3-80f6-a31f98979a11noise",
"type": "default"
},
{
"source": "3db7cee0-31e2-4a3d-94a1-268cb16177dd",
"sourceHandle": "conditioning",
"target": "9755ae4c-ef30-4db3-80f6-a31f98979a11",
"targetHandle": "negative_conditioning",
"id": "reactflow__edge-3db7cee0-31e2-4a3d-94a1-268cb16177ddconditioning-9755ae4c-ef30-4db3-80f6-a31f98979a11negative_conditioning",
"type": "default"
},
{
"source": "aca3b054-bfba-4392-bd20-6476f59504df",
"sourceHandle": "conditioning",
"target": "9755ae4c-ef30-4db3-80f6-a31f98979a11",
"targetHandle": "positive_conditioning",
"id": "reactflow__edge-aca3b054-bfba-4392-bd20-6476f59504dfconditioning-9755ae4c-ef30-4db3-80f6-a31f98979a11positive_conditioning",
"type": "default"
},
{
"source": "610384f1-6f0c-4847-a9a2-37ce7f456ed1",
"sourceHandle": "unet",
"target": "9755ae4c-ef30-4db3-80f6-a31f98979a11",
"targetHandle": "unet",
"id": "reactflow__edge-610384f1-6f0c-4847-a9a2-37ce7f456ed1unet-9755ae4c-ef30-4db3-80f6-a31f98979a11unet",
"type": "default"
},
{
"source": "2ac03cf6-0326-454a-bed0-d8baef2bf30d",
"sourceHandle": "control",
"target": "9755ae4c-ef30-4db3-80f6-a31f98979a11",
"targetHandle": "control",
"id": "reactflow__edge-2ac03cf6-0326-454a-bed0-d8baef2bf30dcontrol-9755ae4c-ef30-4db3-80f6-a31f98979a11control",
"type": "default"
},
{
"source": "9755ae4c-ef30-4db3-80f6-a31f98979a11",
"sourceHandle": "latents",
"target": "28542b66-5a00-4780-a318-0a036d2df914",
"targetHandle": "latents",
"id": "reactflow__edge-9755ae4c-ef30-4db3-80f6-a31f98979a11latents-28542b66-5a00-4780-a318-0a036d2df914latents",
"type": "default"
},
{
"source": "59349822-af20-4e0e-a53f-3ba135d00c3f",
"sourceHandle": "value",
"target": "280fd8a7-3b0c-49fe-8be4-6246e08b6c9a",
"targetHandle": "seed",
"id": "reactflow__edge-59349822-af20-4e0e-a53f-3ba135d00c3fvalue-280fd8a7-3b0c-49fe-8be4-6246e08b6c9aseed",
"type": "default"
}
]
}

View File

@ -26,6 +26,10 @@
{ {
"nodeId": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204", "nodeId": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
"fieldName": "style" "fieldName": "style"
},
{
"nodeId": "87ee6243-fb0d-4f77-ad5f-56591659339e",
"fieldName": "steps"
} }
], ],
"meta": { "meta": {
@ -36,6 +40,7 @@
"id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204", "id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
"type": "invocation", "type": "invocation",
"data": { "data": {
"version": "1.0.0",
"id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204", "id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
"type": "sdxl_compel_prompt", "type": "sdxl_compel_prompt",
"inputs": { "inputs": {
@ -130,12 +135,10 @@
"isOpen": true, "isOpen": true,
"notes": "", "notes": "",
"embedWorkflow": false, "embedWorkflow": false,
"isIntermediate": true, "isIntermediate": true
"useCache": true,
"version": "1.0.0"
}, },
"width": 320, "width": 320,
"height": 793, "height": 764,
"position": { "position": {
"x": 1275, "x": 1275,
"y": -350 "y": -350
@ -145,6 +148,7 @@
"id": "55705012-79b9-4aac-9f26-c0b10309785b", "id": "55705012-79b9-4aac-9f26-c0b10309785b",
"type": "invocation", "type": "invocation",
"data": { "data": {
"version": "1.0.0",
"id": "55705012-79b9-4aac-9f26-c0b10309785b", "id": "55705012-79b9-4aac-9f26-c0b10309785b",
"type": "noise", "type": "noise",
"inputs": { "inputs": {
@ -205,9 +209,7 @@
"isOpen": false, "isOpen": false,
"notes": "", "notes": "",
"embedWorkflow": false, "embedWorkflow": false,
"isIntermediate": true, "isIntermediate": true
"useCache": true,
"version": "1.0.0"
}, },
"width": 320, "width": 320,
"height": 32, "height": 32,
@ -216,10 +218,83 @@
"y": -300 "y": -300
} }
}, },
{
"id": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
"type": "invocation",
"data": {
"version": "1.0.0",
"id": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
"type": "l2i",
"inputs": {
"tiled": {
"id": "24f5bc7b-f6a1-425d-8ab1-f50b4db5d0df",
"name": "tiled",
"type": "boolean",
"fieldKind": "input",
"label": "",
"value": false
},
"fp32": {
"id": "b146d873-ffb9-4767-986a-5360504841a2",
"name": "fp32",
"type": "boolean",
"fieldKind": "input",
"label": "",
"value": true
},
"latents": {
"id": "65441abd-7713-4b00-9d8d-3771404002e8",
"name": "latents",
"type": "LatentsField",
"fieldKind": "input",
"label": ""
},
"vae": {
"id": "a478b833-6e13-4611-9a10-842c89603c74",
"name": "vae",
"type": "VaeField",
"fieldKind": "input",
"label": ""
}
},
"outputs": {
"image": {
"id": "c87ae925-f858-417a-8940-8708ba9b4b53",
"name": "image",
"type": "ImageField",
"fieldKind": "output"
},
"width": {
"id": "4bcb8512-b5a1-45f1-9e52-6e92849f9d6c",
"name": "width",
"type": "integer",
"fieldKind": "output"
},
"height": {
"id": "23e41c00-a354-48e8-8f59-5875679c27ab",
"name": "height",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": true,
"isIntermediate": false
},
"width": 320,
"height": 224,
"position": {
"x": 2025,
"y": -250
}
},
{ {
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2", "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
"type": "invocation", "type": "invocation",
"data": { "data": {
"version": "1.0.0",
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2", "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
"type": "rand_int", "type": "rand_int",
"inputs": { "inputs": {
@ -252,9 +327,7 @@
"isOpen": false, "isOpen": false,
"notes": "", "notes": "",
"embedWorkflow": false, "embedWorkflow": false,
"isIntermediate": true, "isIntermediate": true
"useCache": false,
"version": "1.0.0"
}, },
"width": 320, "width": 320,
"height": 32, "height": 32,
@ -267,6 +340,7 @@
"id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22", "id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
"type": "invocation", "type": "invocation",
"data": { "data": {
"version": "1.0.0",
"id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22", "id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
"type": "sdxl_model_loader", "type": "sdxl_model_loader",
"inputs": { "inputs": {
@ -277,7 +351,7 @@
"fieldKind": "input", "fieldKind": "input",
"label": "", "label": "",
"value": { "value": {
"model_name": "stable-diffusion-xl-base-1-0", "model_name": "stable-diffusion-xl-base-1.0",
"base_model": "sdxl", "base_model": "sdxl",
"model_type": "main" "model_type": "main"
} }
@ -313,12 +387,10 @@
"isOpen": true, "isOpen": true,
"notes": "", "notes": "",
"embedWorkflow": false, "embedWorkflow": false,
"isIntermediate": true, "isIntermediate": true
"useCache": true,
"version": "1.0.0"
}, },
"width": 320, "width": 320,
"height": 258, "height": 234,
"position": { "position": {
"x": 475, "x": 475,
"y": 25 "y": 25
@ -328,6 +400,7 @@
"id": "faf965a4-7530-427b-b1f3-4ba6505c2a08", "id": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
"type": "invocation", "type": "invocation",
"data": { "data": {
"version": "1.0.0",
"id": "faf965a4-7530-427b-b1f3-4ba6505c2a08", "id": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
"type": "sdxl_compel_prompt", "type": "sdxl_compel_prompt",
"inputs": { "inputs": {
@ -422,143 +495,48 @@
"isOpen": true, "isOpen": true,
"notes": "", "notes": "",
"embedWorkflow": false, "embedWorkflow": false,
"isIntermediate": true, "isIntermediate": true
"useCache": true,
"version": "1.0.0"
}, },
"width": 320, "width": 320,
"height": 793, "height": 764,
"position": { "position": {
"x": 900, "x": 900,
"y": -350 "y": -350
} }
}, },
{ {
"id": "63e91020-83b2-4f35-b174-ad9692aabb48", "id": "87ee6243-fb0d-4f77-ad5f-56591659339e",
"type": "invocation", "type": "invocation",
"data": { "data": {
"id": "63e91020-83b2-4f35-b174-ad9692aabb48", "version": "1.0.0",
"type": "l2i", "id": "87ee6243-fb0d-4f77-ad5f-56591659339e",
"inputs": {
"metadata": {
"id": "88971324-3fdb-442d-b8b7-7612478a8622",
"name": "metadata",
"type": "MetadataField",
"fieldKind": "input",
"label": ""
},
"latents": {
"id": "da0e40cb-c49f-4fa5-9856-338b91a65f6b",
"name": "latents",
"type": "LatentsField",
"fieldKind": "input",
"label": ""
},
"vae": {
"id": "ae5164ce-1710-4ec5-a83a-6113a0d1b5c0",
"name": "vae",
"type": "VaeField",
"fieldKind": "input",
"label": ""
},
"tiled": {
"id": "2ccfd535-1a7b-4ecf-84db-9430a64fb3d7",
"name": "tiled",
"type": "boolean",
"fieldKind": "input",
"label": "",
"value": false
},
"fp32": {
"id": "64f07d5a-54a2-429c-8c5b-0c2a3a8e5cd5",
"name": "fp32",
"type": "boolean",
"fieldKind": "input",
"label": "",
"value": false
}
},
"outputs": {
"image": {
"id": "9b281eaa-6504-407d-a5ca-1e5e8020a4bf",
"name": "image",
"type": "ImageField",
"fieldKind": "output"
},
"width": {
"id": "98e545f3-b53b-490d-b94d-bed9418ccc75",
"name": "width",
"type": "integer",
"fieldKind": "output"
},
"height": {
"id": "4a74bd43-d7f7-4c7f-bb3b-d09bb2992c46",
"name": "height",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": false,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 267,
"position": {
"x": 2112.5626808057173,
"y": -174.24042139280238
}
},
{
"id": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
"type": "invocation",
"data": {
"id": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
"type": "denoise_latents", "type": "denoise_latents",
"inputs": { "inputs": {
"positive_conditioning": {
"id": "29b73dfa-a06e-4b4a-a844-515b9eb93a81",
"name": "positive_conditioning",
"type": "ConditioningField",
"fieldKind": "input",
"label": ""
},
"negative_conditioning": {
"id": "a81e6f5b-f4de-4919-b483-b6e2f067465a",
"name": "negative_conditioning",
"type": "ConditioningField",
"fieldKind": "input",
"label": ""
},
"noise": { "noise": {
"id": "4ba06bb7-eb45-4fb9-9984-31001b545587", "id": "4884a4b7-cc19-4fea-83c7-1f940e6edd24",
"name": "noise", "name": "noise",
"type": "LatentsField", "type": "LatentsField",
"fieldKind": "input", "fieldKind": "input",
"label": "" "label": ""
}, },
"steps": { "steps": {
"id": "36ee8a45-ca69-44bc-9bc3-aa881e6045c0", "id": "4c61675c-b6b9-41ac-b187-b5c13b587039",
"name": "steps", "name": "steps",
"type": "integer", "type": "integer",
"fieldKind": "input", "fieldKind": "input",
"label": "", "label": "",
"value": 10 "value": 36
}, },
"cfg_scale": { "cfg_scale": {
"id": "2a2024e0-a736-46ec-933c-c1c1ebe96943", "id": "f8213f35-4637-4a1a-83f4-1f8cfb9ccd2c",
"name": "cfg_scale", "name": "cfg_scale",
"type": "FloatPolymorphic", "type": "float",
"fieldKind": "input", "fieldKind": "input",
"label": "", "label": "",
"value": 7.5 "value": 7.5
}, },
"denoising_start": { "denoising_start": {
"id": "be219d5e-41b7-430a-8fb5-bc21a31ad219", "id": "01e2f30d-0acd-4e21-98b9-a9b8e24c6db2",
"name": "denoising_start", "name": "denoising_start",
"type": "float", "type": "float",
"fieldKind": "input", "fieldKind": "input",
@ -566,7 +544,7 @@
"value": 0 "value": 0
}, },
"denoising_end": { "denoising_end": {
"id": "3adfb7ae-c9f7-4a40-b6e0-4c2050bd1a99", "id": "3db95479-a73b-4c75-9b44-08daec16b224",
"name": "denoising_end", "name": "denoising_end",
"type": "float", "type": "float",
"fieldKind": "input", "fieldKind": "input",
@ -574,71 +552,71 @@
"value": 1 "value": 1
}, },
"scheduler": { "scheduler": {
"id": "14423e0d-7215-4ee0-b065-f9e95eaa8d7d", "id": "db8430a9-64c3-4c54-ae38-9f597cf7b6d5",
"name": "scheduler", "name": "scheduler",
"type": "Scheduler", "type": "Scheduler",
"fieldKind": "input", "fieldKind": "input",
"label": "", "label": "",
"value": "euler" "value": "euler"
}, },
"unet": {
"id": "e73bbf98-6489-492b-b83c-faed215febac",
"name": "unet",
"type": "UNetField",
"fieldKind": "input",
"label": ""
},
"control": { "control": {
"id": "dab351b3-0c86-4ea5-9782-4e8edbfb0607", "id": "599b49e8-6435-4576-be41-a5155f3a17e3",
"name": "control", "name": "control",
"type": "ControlPolymorphic", "type": "ControlField",
"fieldKind": "input",
"label": ""
},
"ip_adapter": {
"id": "192daea0-a90a-43cc-a2ee-0114a8e90318",
"name": "ip_adapter",
"type": "IPAdapterPolymorphic",
"fieldKind": "input",
"label": ""
},
"t2i_adapter": {
"id": "ee386a55-d4c7-48c1-ac57-7bc4e3aada7a",
"name": "t2i_adapter",
"type": "T2IAdapterPolymorphic",
"fieldKind": "input", "fieldKind": "input",
"label": "" "label": ""
}, },
"latents": { "latents": {
"id": "3a922c6a-3d8c-4c9e-b3ec-2f4d81cda077", "id": "226f9e91-454e-4159-9fa6-019c0cf29277",
"name": "latents", "name": "latents",
"type": "LatentsField", "type": "LatentsField",
"fieldKind": "input", "fieldKind": "input",
"label": "" "label": ""
}, },
"denoise_mask": { "denoise_mask": {
"id": "cd7ce032-835f-495f-8b45-d57272f33132", "id": "de019cb6-7fb5-45bf-a266-22e20889893f",
"name": "denoise_mask", "name": "denoise_mask",
"type": "DenoiseMaskField", "type": "DenoiseMaskField",
"fieldKind": "input", "fieldKind": "input",
"label": "" "label": ""
},
"positive_conditioning": {
"id": "02fc400a-110d-470e-8411-f404f966a949",
"name": "positive_conditioning",
"type": "ConditioningField",
"fieldKind": "input",
"label": ""
},
"negative_conditioning": {
"id": "4bd3bdfa-fcf4-42be-8e47-1e314255798f",
"name": "negative_conditioning",
"type": "ConditioningField",
"fieldKind": "input",
"label": ""
},
"unet": {
"id": "7c2d58a8-b5f1-4e63-8ffd-8ada52c35832",
"name": "unet",
"type": "UNetField",
"fieldKind": "input",
"label": ""
} }
}, },
"outputs": { "outputs": {
"latents": { "latents": {
"id": "6260b84f-8361-470a-98d8-5b22a45c2d8c", "id": "6a6fa492-de26-4e95-b1d9-a322fe37eb13",
"name": "latents", "name": "latents",
"type": "LatentsField", "type": "LatentsField",
"fieldKind": "output" "fieldKind": "output"
}, },
"width": { "width": {
"id": "aede0ecf-25b6-46be-aa30-b77f79715deb", "id": "a9790729-7d6c-4418-903d-4da961fccf56",
"name": "width", "name": "width",
"type": "integer", "type": "integer",
"fieldKind": "output" "fieldKind": "output"
}, },
"height": { "height": {
"id": "519abf62-d475-48ef-ab8f-66136bc0e499", "id": "fa74efe5-7330-4a3c-b256-c82a544585b4",
"name": "height", "name": "height",
"type": "integer", "type": "integer",
"fieldKind": "output" "fieldKind": "output"
@ -648,15 +626,13 @@
"isOpen": true, "isOpen": true,
"notes": "", "notes": "",
"embedWorkflow": false, "embedWorkflow": false,
"isIntermediate": true, "isIntermediate": true
"useCache": true,
"version": "1.4.0"
}, },
"width": 320, "width": 320,
"height": 646, "height": 558,
"position": { "position": {
"x": 1642.955772577545, "x": 1650,
"y": -230.2485847594651 "y": -250
} }
} }
], ],
@ -710,42 +686,50 @@
{ {
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22", "source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
"sourceHandle": "vae", "sourceHandle": "vae",
"target": "63e91020-83b2-4f35-b174-ad9692aabb48", "target": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
"targetHandle": "vae", "targetHandle": "vae",
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22vae-63e91020-83b2-4f35-b174-ad9692aabb48vae", "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22vae-dbcd2f98-d809-48c8-bf64-2635f88a2fe9vae",
"type": "default" "type": "default"
}, },
{ {
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22", "source": "87ee6243-fb0d-4f77-ad5f-56591659339e",
"sourceHandle": "unet", "sourceHandle": "latents",
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb", "target": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
"targetHandle": "unet", "targetHandle": "latents",
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22unet-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbunet", "id": "reactflow__edge-87ee6243-fb0d-4f77-ad5f-56591659339elatents-dbcd2f98-d809-48c8-bf64-2635f88a2fe9latents",
"type": "default" "type": "default"
}, },
{ {
"source": "faf965a4-7530-427b-b1f3-4ba6505c2a08", "source": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
"sourceHandle": "conditioning", "sourceHandle": "conditioning",
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb", "target": "87ee6243-fb0d-4f77-ad5f-56591659339e",
"targetHandle": "positive_conditioning", "targetHandle": "positive_conditioning",
"id": "reactflow__edge-faf965a4-7530-427b-b1f3-4ba6505c2a08conditioning-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbpositive_conditioning", "id": "reactflow__edge-faf965a4-7530-427b-b1f3-4ba6505c2a08conditioning-87ee6243-fb0d-4f77-ad5f-56591659339epositive_conditioning",
"type": "default" "type": "default"
}, },
{ {
"source": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204", "source": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
"sourceHandle": "conditioning", "sourceHandle": "conditioning",
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb", "target": "87ee6243-fb0d-4f77-ad5f-56591659339e",
"targetHandle": "negative_conditioning", "targetHandle": "negative_conditioning",
"id": "reactflow__edge-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204conditioning-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbnegative_conditioning", "id": "reactflow__edge-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204conditioning-87ee6243-fb0d-4f77-ad5f-56591659339enegative_conditioning",
"type": "default"
},
{
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
"sourceHandle": "unet",
"target": "87ee6243-fb0d-4f77-ad5f-56591659339e",
"targetHandle": "unet",
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22unet-87ee6243-fb0d-4f77-ad5f-56591659339eunet",
"type": "default" "type": "default"
}, },
{ {
"source": "55705012-79b9-4aac-9f26-c0b10309785b", "source": "55705012-79b9-4aac-9f26-c0b10309785b",
"sourceHandle": "noise", "sourceHandle": "noise",
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb", "target": "87ee6243-fb0d-4f77-ad5f-56591659339e",
"targetHandle": "noise", "targetHandle": "noise",
"id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbnoise", "id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-87ee6243-fb0d-4f77-ad5f-56591659339enoise",
"type": "default" "type": "default"
} }
] ]
} }

File diff suppressed because it is too large Load Diff

View File

@ -18,6 +18,10 @@
{ {
"nodeId": "93dc02a4-d05b-48ed-b99c-c9b616af3402", "nodeId": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
"fieldName": "prompt" "fieldName": "prompt"
},
{
"nodeId": "75899702-fa44-46d2-b2d5-3e17f234c3e7",
"fieldName": "steps"
} }
], ],
"meta": { "meta": {
@ -28,6 +32,7 @@
"id": "93dc02a4-d05b-48ed-b99c-c9b616af3402", "id": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
"type": "invocation", "type": "invocation",
"data": { "data": {
"version": "1.0.0",
"id": "93dc02a4-d05b-48ed-b99c-c9b616af3402", "id": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
"type": "compel", "type": "compel",
"inputs": { "inputs": {
@ -59,21 +64,20 @@
"isOpen": true, "isOpen": true,
"notes": "", "notes": "",
"embedWorkflow": false, "embedWorkflow": false,
"isIntermediate": true, "isIntermediate": true
"useCache": true,
"version": "1.0.0"
}, },
"width": 320, "width": 320,
"height": 261, "height": 235,
"position": { "position": {
"x": 995.7263915923627, "x": 1400,
"y": 239.67783573351227 "y": -75
} }
}, },
{ {
"id": "55705012-79b9-4aac-9f26-c0b10309785b", "id": "55705012-79b9-4aac-9f26-c0b10309785b",
"type": "invocation", "type": "invocation",
"data": { "data": {
"version": "1.0.0",
"id": "55705012-79b9-4aac-9f26-c0b10309785b", "id": "55705012-79b9-4aac-9f26-c0b10309785b",
"type": "noise", "type": "noise",
"inputs": { "inputs": {
@ -134,21 +138,92 @@
"isOpen": true, "isOpen": true,
"notes": "", "notes": "",
"embedWorkflow": false, "embedWorkflow": false,
"isIntermediate": true, "isIntermediate": true
"useCache": true,
"version": "1.0.0"
}, },
"width": 320, "width": 320,
"height": 389, "height": 364,
"position": { "position": {
"x": 993.4442117555518, "x": 1000,
"y": 605.6757415334787 "y": 350
}
},
{
"id": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
"type": "invocation",
"data": {
"version": "1.0.0",
"id": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
"type": "l2i",
"inputs": {
"tiled": {
"id": "24f5bc7b-f6a1-425d-8ab1-f50b4db5d0df",
"name": "tiled",
"type": "boolean",
"fieldKind": "input",
"label": "",
"value": false
},
"fp32": {
"id": "b146d873-ffb9-4767-986a-5360504841a2",
"name": "fp32",
"type": "boolean",
"fieldKind": "input",
"label": "",
"value": false
},
"latents": {
"id": "65441abd-7713-4b00-9d8d-3771404002e8",
"name": "latents",
"type": "LatentsField",
"fieldKind": "input",
"label": ""
},
"vae": {
"id": "a478b833-6e13-4611-9a10-842c89603c74",
"name": "vae",
"type": "VaeField",
"fieldKind": "input",
"label": ""
}
},
"outputs": {
"image": {
"id": "c87ae925-f858-417a-8940-8708ba9b4b53",
"name": "image",
"type": "ImageField",
"fieldKind": "output"
},
"width": {
"id": "4bcb8512-b5a1-45f1-9e52-6e92849f9d6c",
"name": "width",
"type": "integer",
"fieldKind": "output"
},
"height": {
"id": "23e41c00-a354-48e8-8f59-5875679c27ab",
"name": "height",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": true,
"isIntermediate": false
},
"width": 320,
"height": 266,
"position": {
"x": 1800,
"y": 200
} }
}, },
{ {
"id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8", "id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
"type": "invocation", "type": "invocation",
"data": { "data": {
"version": "1.0.0",
"id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8", "id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
"type": "main_model_loader", "type": "main_model_loader",
"inputs": { "inputs": {
@ -186,24 +261,23 @@
} }
}, },
"label": "", "label": "",
"isOpen": true, "isOpen": false,
"notes": "", "notes": "",
"embedWorkflow": false, "embedWorkflow": false,
"isIntermediate": true, "isIntermediate": true
"useCache": true,
"version": "1.0.0"
}, },
"width": 320, "width": 320,
"height": 226, "height": 32,
"position": { "position": {
"x": 163.04436745878343, "x": 1000,
"y": 254.63156870373479 "y": 200
} }
}, },
{ {
"id": "7d8bf987-284f-413a-b2fd-d825445a5d6c", "id": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
"type": "invocation", "type": "invocation",
"data": { "data": {
"version": "1.0.0",
"id": "7d8bf987-284f-413a-b2fd-d825445a5d6c", "id": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
"type": "compel", "type": "compel",
"inputs": { "inputs": {
@ -235,21 +309,20 @@
"isOpen": true, "isOpen": true,
"notes": "", "notes": "",
"embedWorkflow": false, "embedWorkflow": false,
"isIntermediate": true, "isIntermediate": true
"useCache": true,
"version": "1.0.0"
}, },
"width": 320, "width": 320,
"height": 261, "height": 235,
"position": { "position": {
"x": 595.7263915923627, "x": 1000,
"y": 239.67783573351227 "y": -75
} }
}, },
{ {
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2", "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
"type": "invocation", "type": "invocation",
"data": { "data": {
"version": "1.0.0",
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2", "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
"type": "rand_int", "type": "rand_int",
"inputs": { "inputs": {
@ -279,66 +352,51 @@
} }
}, },
"label": "Random Seed", "label": "Random Seed",
"isOpen": true, "isOpen": false,
"notes": "", "notes": "",
"embedWorkflow": false, "embedWorkflow": false,
"isIntermediate": true, "isIntermediate": true
"useCache": false,
"version": "1.0.0"
}, },
"width": 320, "width": 320,
"height": 218, "height": 32,
"position": { "position": {
"x": 541.094822888628, "x": 1000,
"y": 694.5704476446829 "y": 275
} }
}, },
{ {
"id": "eea2702a-19fb-45b5-9d75-56b4211ec03c", "id": "75899702-fa44-46d2-b2d5-3e17f234c3e7",
"type": "invocation", "type": "invocation",
"data": { "data": {
"id": "eea2702a-19fb-45b5-9d75-56b4211ec03c", "version": "1.0.0",
"id": "75899702-fa44-46d2-b2d5-3e17f234c3e7",
"type": "denoise_latents", "type": "denoise_latents",
"inputs": { "inputs": {
"positive_conditioning": {
"id": "90b7f4f8-ada7-4028-8100-d2e54f192052",
"name": "positive_conditioning",
"type": "ConditioningField",
"fieldKind": "input",
"label": ""
},
"negative_conditioning": {
"id": "9393779e-796c-4f64-b740-902a1177bf53",
"name": "negative_conditioning",
"type": "ConditioningField",
"fieldKind": "input",
"label": ""
},
"noise": { "noise": {
"id": "8e17f1e5-4f98-40b1-b7f4-86aeeb4554c1", "id": "8b18f3eb-40d2-45c1-9a9d-28d6af0dce2b",
"name": "noise", "name": "noise",
"type": "LatentsField", "type": "LatentsField",
"fieldKind": "input", "fieldKind": "input",
"label": "" "label": ""
}, },
"steps": { "steps": {
"id": "9b63302d-6bd2-42c9-ac13-9b1afb51af88", "id": "0be4373c-46f3-441c-80a7-a4bb6ceb498c",
"name": "steps", "name": "steps",
"type": "integer", "type": "integer",
"fieldKind": "input", "fieldKind": "input",
"label": "", "label": "",
"value": 10 "value": 36
}, },
"cfg_scale": { "cfg_scale": {
"id": "87dd04d3-870e-49e1-98bf-af003a810109", "id": "107267ce-4666-4cd7-94b3-7476b7973ae9",
"name": "cfg_scale", "name": "cfg_scale",
"type": "FloatPolymorphic", "type": "float",
"fieldKind": "input", "fieldKind": "input",
"label": "", "label": "",
"value": 7.5 "value": 7.5
}, },
"denoising_start": { "denoising_start": {
"id": "f369d80f-4931-4740-9bcd-9f0620719fab", "id": "d2ce9f0f-5fc2-48b2-b917-53442941e9a1",
"name": "denoising_start", "name": "denoising_start",
"type": "float", "type": "float",
"fieldKind": "input", "fieldKind": "input",
@ -346,7 +404,7 @@
"value": 0 "value": 0
}, },
"denoising_end": { "denoising_end": {
"id": "747d10e5-6f02-445c-994c-0604d814de8c", "id": "8ad51505-b8d0-422a-beb8-96fc6fc6b65f",
"name": "denoising_end", "name": "denoising_end",
"type": "float", "type": "float",
"fieldKind": "input", "fieldKind": "input",
@ -354,71 +412,71 @@
"value": 1 "value": 1
}, },
"scheduler": { "scheduler": {
"id": "1de84a4e-3a24-4ec8-862b-16ce49633b9b", "id": "53092874-a43b-4623-91a2-76e62fdb1f2e",
"name": "scheduler", "name": "scheduler",
"type": "Scheduler", "type": "Scheduler",
"fieldKind": "input", "fieldKind": "input",
"label": "", "label": "",
"value": "euler" "value": "euler"
}, },
"unet": {
"id": "ffa6fef4-3ce2-4bdb-9296-9a834849489b",
"name": "unet",
"type": "UNetField",
"fieldKind": "input",
"label": ""
},
"control": { "control": {
"id": "077b64cb-34be-4fcc-83f2-e399807a02bd", "id": "7abe57cc-469d-437e-ad72-a18efa28215f",
"name": "control", "name": "control",
"type": "ControlPolymorphic", "type": "ControlField",
"fieldKind": "input",
"label": ""
},
"ip_adapter": {
"id": "1d6948f7-3a65-4a65-a20c-768b287251aa",
"name": "ip_adapter",
"type": "IPAdapterPolymorphic",
"fieldKind": "input",
"label": ""
},
"t2i_adapter": {
"id": "75e67b09-952f-4083-aaf4-6b804d690412",
"name": "t2i_adapter",
"type": "T2IAdapterPolymorphic",
"fieldKind": "input", "fieldKind": "input",
"label": "" "label": ""
}, },
"latents": { "latents": {
"id": "334d4ba3-5a99-4195-82c5-86fb3f4f7d43", "id": "add8bbe5-14d0-42d4-a867-9c65ab8dd129",
"name": "latents", "name": "latents",
"type": "LatentsField", "type": "LatentsField",
"fieldKind": "input", "fieldKind": "input",
"label": "" "label": ""
}, },
"denoise_mask": { "denoise_mask": {
"id": "0d3dbdbf-b014-4e95-8b18-ff2ff9cb0bfa", "id": "f373a190-0fc8-45b7-ae62-c4aa8e9687e1",
"name": "denoise_mask", "name": "denoise_mask",
"type": "DenoiseMaskField", "type": "DenoiseMaskField",
"fieldKind": "input", "fieldKind": "input",
"label": "" "label": ""
},
"positive_conditioning": {
"id": "c7160303-8a23-4f15-9197-855d48802a7f",
"name": "positive_conditioning",
"type": "ConditioningField",
"fieldKind": "input",
"label": ""
},
"negative_conditioning": {
"id": "fd750efa-1dfc-4d0b-accb-828e905ba320",
"name": "negative_conditioning",
"type": "ConditioningField",
"fieldKind": "input",
"label": ""
},
"unet": {
"id": "af1f41ba-ce2a-4314-8d7f-494bb5800381",
"name": "unet",
"type": "UNetField",
"fieldKind": "input",
"label": ""
} }
}, },
"outputs": { "outputs": {
"latents": { "latents": {
"id": "70fa5bbc-0c38-41bb-861a-74d6d78d2f38", "id": "8508d04d-f999-4a44-94d0-388ab1401d27",
"name": "latents", "name": "latents",
"type": "LatentsField", "type": "LatentsField",
"fieldKind": "output" "fieldKind": "output"
}, },
"width": { "width": {
"id": "98ee0e6c-82aa-4e8f-8be5-dc5f00ee47f0", "id": "93dc8287-0a2a-4320-83a4-5e994b7ba23e",
"name": "width", "name": "width",
"type": "integer", "type": "integer",
"fieldKind": "output" "fieldKind": "output"
}, },
"height": { "height": {
"id": "e8cb184a-5e1a-47c8-9695-4b8979564f5d", "id": "d9862f5c-0ab5-46fa-8c29-5059bb581d96",
"name": "height", "name": "height",
"type": "integer", "type": "integer",
"fieldKind": "output" "fieldKind": "output"
@ -428,95 +486,13 @@
"isOpen": true, "isOpen": true,
"notes": "", "notes": "",
"embedWorkflow": false, "embedWorkflow": false,
"isIntermediate": true, "isIntermediate": true
"useCache": true,
"version": "1.4.0"
}, },
"width": 320, "width": 320,
"height": 646, "height": 558,
"position": { "position": {
"x": 1476.5794704734735, "x": 1400,
"y": 256.80174342731783 "y": 200
}
},
{
"id": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
"type": "invocation",
"data": {
"id": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
"type": "l2i",
"inputs": {
"metadata": {
"id": "ab375f12-0042-4410-9182-29e30db82c85",
"name": "metadata",
"type": "MetadataField",
"fieldKind": "input",
"label": ""
},
"latents": {
"id": "3a7e7efd-bff5-47d7-9d48-615127afee78",
"name": "latents",
"type": "LatentsField",
"fieldKind": "input",
"label": ""
},
"vae": {
"id": "a1f5f7a1-0795-4d58-b036-7820c0b0ef2b",
"name": "vae",
"type": "VaeField",
"fieldKind": "input",
"label": ""
},
"tiled": {
"id": "da52059a-0cee-4668-942f-519aa794d739",
"name": "tiled",
"type": "boolean",
"fieldKind": "input",
"label": "",
"value": false
},
"fp32": {
"id": "c4841df3-b24e-4140-be3b-ccd454c2522c",
"name": "fp32",
"type": "boolean",
"fieldKind": "input",
"label": "",
"value": false
}
},
"outputs": {
"image": {
"id": "72d667d0-cf85-459d-abf2-28bd8b823fe7",
"name": "image",
"type": "ImageField",
"fieldKind": "output"
},
"width": {
"id": "c8c907d8-1066-49d1-b9a6-83bdcd53addc",
"name": "width",
"type": "integer",
"fieldKind": "output"
},
"height": {
"id": "230f359c-b4ea-436c-b372-332d7dcdca85",
"name": "height",
"type": "integer",
"fieldKind": "output"
}
},
"label": "",
"isOpen": true,
"notes": "",
"embedWorkflow": false,
"isIntermediate": false,
"useCache": true,
"version": "1.0.0"
},
"width": 320,
"height": 267,
"position": {
"x": 2037.9648469717395,
"y": 426.10844427600136
} }
} }
], ],
@ -546,52 +522,52 @@
"type": "default" "type": "default"
}, },
{ {
"source": "55705012-79b9-4aac-9f26-c0b10309785b", "source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
"sourceHandle": "noise", "sourceHandle": "vae",
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c", "target": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
"targetHandle": "noise", "targetHandle": "vae",
"id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-eea2702a-19fb-45b5-9d75-56b4211ec03cnoise", "id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8vae-dbcd2f98-d809-48c8-bf64-2635f88a2fe9vae",
"type": "default"
},
{
"source": "75899702-fa44-46d2-b2d5-3e17f234c3e7",
"sourceHandle": "latents",
"target": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
"targetHandle": "latents",
"id": "reactflow__edge-75899702-fa44-46d2-b2d5-3e17f234c3e7latents-dbcd2f98-d809-48c8-bf64-2635f88a2fe9latents",
"type": "default" "type": "default"
}, },
{ {
"source": "7d8bf987-284f-413a-b2fd-d825445a5d6c", "source": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
"sourceHandle": "conditioning", "sourceHandle": "conditioning",
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c", "target": "75899702-fa44-46d2-b2d5-3e17f234c3e7",
"targetHandle": "positive_conditioning", "targetHandle": "positive_conditioning",
"id": "reactflow__edge-7d8bf987-284f-413a-b2fd-d825445a5d6cconditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cpositive_conditioning", "id": "reactflow__edge-7d8bf987-284f-413a-b2fd-d825445a5d6cconditioning-75899702-fa44-46d2-b2d5-3e17f234c3e7positive_conditioning",
"type": "default" "type": "default"
}, },
{ {
"source": "93dc02a4-d05b-48ed-b99c-c9b616af3402", "source": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
"sourceHandle": "conditioning", "sourceHandle": "conditioning",
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c", "target": "75899702-fa44-46d2-b2d5-3e17f234c3e7",
"targetHandle": "negative_conditioning", "targetHandle": "negative_conditioning",
"id": "reactflow__edge-93dc02a4-d05b-48ed-b99c-c9b616af3402conditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cnegative_conditioning", "id": "reactflow__edge-93dc02a4-d05b-48ed-b99c-c9b616af3402conditioning-75899702-fa44-46d2-b2d5-3e17f234c3e7negative_conditioning",
"type": "default" "type": "default"
}, },
{ {
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8", "source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
"sourceHandle": "unet", "sourceHandle": "unet",
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c", "target": "75899702-fa44-46d2-b2d5-3e17f234c3e7",
"targetHandle": "unet", "targetHandle": "unet",
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8unet-eea2702a-19fb-45b5-9d75-56b4211ec03cunet", "id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8unet-75899702-fa44-46d2-b2d5-3e17f234c3e7unet",
"type": "default" "type": "default"
}, },
{ {
"source": "eea2702a-19fb-45b5-9d75-56b4211ec03c", "source": "55705012-79b9-4aac-9f26-c0b10309785b",
"sourceHandle": "latents", "sourceHandle": "noise",
"target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f", "target": "75899702-fa44-46d2-b2d5-3e17f234c3e7",
"targetHandle": "latents", "targetHandle": "noise",
"id": "reactflow__edge-eea2702a-19fb-45b5-9d75-56b4211ec03clatents-58c957f5-0d01-41fc-a803-b2bbf0413d4flatents", "id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-75899702-fa44-46d2-b2d5-3e17f234c3e7noise",
"type": "default"
},
{
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
"sourceHandle": "vae",
"target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
"targetHandle": "vae",
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8vae-58c957f5-0d01-41fc-a803-b2bbf0413d4fvae",
"type": "default" "type": "default"
} }
] ]
} }

View File

@ -244,7 +244,7 @@ class InvokeAiInstance:
"numpy~=1.24.0", # choose versions that won't be uninstalled during phase 2 "numpy~=1.24.0", # choose versions that won't be uninstalled during phase 2
"urllib3~=1.26.0", "urllib3~=1.26.0",
"requests~=2.28.0", "requests~=2.28.0",
"torch==2.1.0", "torch~=2.0.0",
"torchmetrics==0.11.4", "torchmetrics==0.11.4",
"torchvision>=0.14.1", "torchvision>=0.14.1",
"--force-reinstall", "--force-reinstall",
@ -460,10 +460,10 @@ def get_torch_source() -> (Union[str, None], str):
url = "https://download.pytorch.org/whl/cpu" url = "https://download.pytorch.org/whl/cpu"
if device == "cuda": if device == "cuda":
url = "https://download.pytorch.org/whl/cu121" url = "https://download.pytorch.org/whl/cu118"
optional_modules = "[xformers,onnx-cuda]" optional_modules = "[xformers,onnx-cuda]"
if device == "cuda_and_dml": if device == "cuda_and_dml":
url = "https://download.pytorch.org/whl/cu121" url = "https://download.pytorch.org/whl/cu118"
optional_modules = "[xformers,onnx-directml]" optional_modules = "[xformers,onnx-directml]"
# in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13 # in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13

View File

@ -137,7 +137,7 @@ def dest_path(dest=None) -> Path:
path_completer = PathCompleter( path_completer = PathCompleter(
only_directories=True, only_directories=True,
expanduser=True, expanduser=True,
get_paths=lambda: [browse_start], # noqa: B023 get_paths=lambda: [browse_start],
# get_paths=lambda: [".."].extend(list(browse_start.iterdir())) # get_paths=lambda: [".."].extend(list(browse_start.iterdir()))
) )
@ -149,7 +149,7 @@ def dest_path(dest=None) -> Path:
completer=path_completer, completer=path_completer,
default=str(browse_start) + os.sep, default=str(browse_start) + os.sep,
vi_mode=True, vi_mode=True,
complete_while_typing=True, complete_while_typing=True
# Test that this is not needed on Windows # Test that this is not needed on Windows
# complete_style=CompleteStyle.READLINE_LIKE, # complete_style=CompleteStyle.READLINE_LIKE,
) )

View File

@ -24,7 +24,6 @@ from ..services.item_storage.item_storage_sqlite import SqliteItemStorage
from ..services.latents_storage.latents_storage_disk import DiskLatentsStorage from ..services.latents_storage.latents_storage_disk import DiskLatentsStorage
from ..services.latents_storage.latents_storage_forward_cache import ForwardCacheLatentsStorage from ..services.latents_storage.latents_storage_forward_cache import ForwardCacheLatentsStorage
from ..services.model_manager.model_manager_default import ModelManagerService from ..services.model_manager.model_manager_default import ModelManagerService
from ..services.model_records import ModelRecordServiceSQL
from ..services.names.names_default import SimpleNameService from ..services.names.names_default import SimpleNameService
from ..services.session_processor.session_processor_default import DefaultSessionProcessor from ..services.session_processor.session_processor_default import DefaultSessionProcessor
from ..services.session_queue.session_queue_sqlite import SqliteSessionQueue from ..services.session_queue.session_queue_sqlite import SqliteSessionQueue
@ -86,7 +85,6 @@ class ApiDependencies:
invocation_cache = MemoryInvocationCache(max_cache_size=config.node_cache_size) invocation_cache = MemoryInvocationCache(max_cache_size=config.node_cache_size)
latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f"{output_folder}/latents")) latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f"{output_folder}/latents"))
model_manager = ModelManagerService(config, logger) model_manager = ModelManagerService(config, logger)
model_record_service = ModelRecordServiceSQL(db=db)
names = SimpleNameService() names = SimpleNameService()
performance_statistics = InvocationStatsService() performance_statistics = InvocationStatsService()
processor = DefaultInvocationProcessor() processor = DefaultInvocationProcessor()
@ -113,7 +111,6 @@ class ApiDependencies:
latents=latents, latents=latents,
logger=logger, logger=logger,
model_manager=model_manager, model_manager=model_manager,
model_records=model_record_service,
names=names, names=names,
performance_statistics=performance_statistics, performance_statistics=performance_statistics,
processor=processor, processor=processor,

View File

@ -28,7 +28,7 @@ class FastAPIEventService(EventServiceBase):
self.__queue.put(None) self.__queue.put(None)
def dispatch(self, event_name: str, payload: Any) -> None: def dispatch(self, event_name: str, payload: Any) -> None:
self.__queue.put({"event_name": event_name, "payload": payload}) self.__queue.put(dict(event_name=event_name, payload=payload))
async def __dispatch_from_queue(self, stop_event: threading.Event): async def __dispatch_from_queue(self, stop_event: threading.Event):
"""Get events on from the queue and dispatch them, from the correct thread""" """Get events on from the queue and dispatch them, from the correct thread"""

View File

@ -1,164 +0,0 @@
# Copyright (c) 2023 Lincoln D. Stein
"""FastAPI route for model configuration records."""
from hashlib import sha1
from random import randbytes
from typing import List, Optional
from fastapi import Body, Path, Query, Response
from fastapi.routing import APIRouter
from pydantic import BaseModel, ConfigDict
from starlette.exceptions import HTTPException
from typing_extensions import Annotated
from invokeai.app.services.model_records import (
DuplicateModelException,
InvalidModelException,
UnknownModelException,
)
from invokeai.backend.model_manager.config import (
AnyModelConfig,
BaseModelType,
ModelType,
)
from ..dependencies import ApiDependencies
model_records_router = APIRouter(prefix="/v1/model/record", tags=["models"])
class ModelsList(BaseModel):
"""Return list of configs."""
models: list[AnyModelConfig]
model_config = ConfigDict(use_enum_values=True)
@model_records_router.get(
"/",
operation_id="list_model_records",
)
async def list_model_records(
base_models: Optional[List[BaseModelType]] = Query(default=None, description="Base models to include"),
model_type: Optional[ModelType] = Query(default=None, description="The type of model to get"),
) -> ModelsList:
"""Get a list of models."""
record_store = ApiDependencies.invoker.services.model_records
found_models: list[AnyModelConfig] = []
if base_models:
for base_model in base_models:
found_models.extend(record_store.search_by_attr(base_model=base_model, model_type=model_type))
else:
found_models.extend(record_store.search_by_attr(model_type=model_type))
return ModelsList(models=found_models)
@model_records_router.get(
"/i/{key}",
operation_id="get_model_record",
responses={
200: {"description": "Success"},
400: {"description": "Bad request"},
404: {"description": "The model could not be found"},
},
)
async def get_model_record(
key: str = Path(description="Key of the model record to fetch."),
) -> AnyModelConfig:
"""Get a model record"""
record_store = ApiDependencies.invoker.services.model_records
try:
return record_store.get_model(key)
except UnknownModelException as e:
raise HTTPException(status_code=404, detail=str(e))
@model_records_router.patch(
"/i/{key}",
operation_id="update_model_record",
responses={
200: {"description": "The model was updated successfully"},
400: {"description": "Bad request"},
404: {"description": "The model could not be found"},
409: {"description": "There is already a model corresponding to the new name"},
},
status_code=200,
response_model=AnyModelConfig,
)
async def update_model_record(
key: Annotated[str, Path(description="Unique key of model")],
info: Annotated[AnyModelConfig, Body(description="Model config", discriminator="type")],
) -> AnyModelConfig:
"""Update model contents with a new config. If the model name or base fields are changed, then the model is renamed."""
logger = ApiDependencies.invoker.services.logger
record_store = ApiDependencies.invoker.services.model_records
try:
model_response = record_store.update_model(key, config=info)
logger.info(f"Updated model: {key}")
except UnknownModelException as e:
raise HTTPException(status_code=404, detail=str(e))
except ValueError as e:
logger.error(str(e))
raise HTTPException(status_code=409, detail=str(e))
return model_response
@model_records_router.delete(
"/i/{key}",
operation_id="del_model_record",
responses={
204: {"description": "Model deleted successfully"},
404: {"description": "Model not found"},
},
status_code=204,
)
async def del_model_record(
key: str = Path(description="Unique key of model to remove from model registry."),
) -> Response:
"""Delete Model"""
logger = ApiDependencies.invoker.services.logger
try:
record_store = ApiDependencies.invoker.services.model_records
record_store.del_model(key)
logger.info(f"Deleted model: {key}")
return Response(status_code=204)
except UnknownModelException as e:
logger.error(str(e))
raise HTTPException(status_code=404, detail=str(e))
@model_records_router.post(
"/i/",
operation_id="add_model_record",
responses={
201: {"description": "The model added successfully"},
409: {"description": "There is already a model corresponding to this path or repo_id"},
415: {"description": "Unrecognized file/folder format"},
},
status_code=201,
)
async def add_model_record(
config: Annotated[AnyModelConfig, Body(description="Model config", discriminator="type")]
) -> AnyModelConfig:
"""
Add a model using the configuration information appropriate for its type.
"""
logger = ApiDependencies.invoker.services.logger
record_store = ApiDependencies.invoker.services.model_records
if config.key == "<NOKEY>":
config.key = sha1(randbytes(100)).hexdigest()
logger.info(f"Created model {config.key} for {config.name}")
try:
record_store.add_model(config.key, config)
except DuplicateModelException as e:
logger.error(str(e))
raise HTTPException(status_code=409, detail=str(e))
except InvalidModelException as e:
logger.error(str(e))
raise HTTPException(status_code=415)
# now fetch it out
return record_store.get_model(config.key)

View File

@ -1,5 +1,6 @@
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654), 2023 Kent Keirsey (https://github.com/hipsterusername), 2023 Lincoln D. Stein # Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654), 2023 Kent Keirsey (https://github.com/hipsterusername), 2023 Lincoln D. Stein
import pathlib import pathlib
from typing import Annotated, List, Literal, Optional, Union from typing import Annotated, List, Literal, Optional, Union
@ -54,7 +55,7 @@ async def list_models(
) -> ModelsList: ) -> ModelsList:
"""Gets a list of models""" """Gets a list of models"""
if base_models and len(base_models) > 0: if base_models and len(base_models) > 0:
models_raw = [] models_raw = list()
for base_model in base_models: for base_model in base_models:
models_raw.extend(ApiDependencies.invoker.services.model_manager.list_models(base_model, model_type)) models_raw.extend(ApiDependencies.invoker.services.model_manager.list_models(base_model, model_type))
else: else:

View File

@ -34,4 +34,4 @@ class SocketIO:
async def _handle_unsub_queue(self, sid, data, *args, **kwargs): async def _handle_unsub_queue(self, sid, data, *args, **kwargs):
if "queue_id" in data: if "queue_id" in data:
await self.__sio.leave_room(sid, data["queue_id"]) await self.__sio.enter_room(sid, data["queue_id"])

View File

@ -43,7 +43,6 @@ if True: # hack to make flake8 happy with imports coming after setting up the c
board_images, board_images,
boards, boards,
images, images,
model_records,
models, models,
session_queue, session_queue,
sessions, sessions,
@ -107,7 +106,6 @@ app.include_router(sessions.session_router, prefix="/api")
app.include_router(utilities.utilities_router, prefix="/api") app.include_router(utilities.utilities_router, prefix="/api")
app.include_router(models.models_router, prefix="/api") app.include_router(models.models_router, prefix="/api")
app.include_router(model_records.model_records_router, prefix="/api")
app.include_router(images.images_router, prefix="/api") app.include_router(images.images_router, prefix="/api")
app.include_router(boards.boards_router, prefix="/api") app.include_router(boards.boards_router, prefix="/api")
app.include_router(board_images.board_images_router, prefix="/api") app.include_router(board_images.board_images_router, prefix="/api")
@ -132,7 +130,7 @@ def custom_openapi() -> dict[str, Any]:
# Add all outputs # Add all outputs
all_invocations = BaseInvocation.get_invocations() all_invocations = BaseInvocation.get_invocations()
output_types = set() output_types = set()
output_type_titles = {} output_type_titles = dict()
for invoker in all_invocations: for invoker in all_invocations:
output_type = signature(invoker.invoke).return_annotation output_type = signature(invoker.invoke).return_annotation
output_types.add(output_type) output_types.add(output_type)
@ -173,12 +171,12 @@ def custom_openapi() -> dict[str, Any]:
# print(f"Config with name {name} already defined") # print(f"Config with name {name} already defined")
continue continue
openapi_schema["components"]["schemas"][name] = { openapi_schema["components"]["schemas"][name] = dict(
"title": name, title=name,
"description": "An enumeration.", description="An enumeration.",
"type": "string", type="string",
"enum": [v.value for v in model_config_format_enum], enum=list(v.value for v in model_config_format_enum),
} )
app.openapi_schema = openapi_schema app.openapi_schema = openapi_schema
return app.openapi_schema return app.openapi_schema

View File

@ -25,4 +25,4 @@ spec.loader.exec_module(module)
# add core nodes to __all__ # add core nodes to __all__
python_files = filter(lambda f: not f.name.startswith("_"), Path(__file__).parent.glob("*.py")) python_files = filter(lambda f: not f.name.startswith("_"), Path(__file__).parent.glob("*.py"))
__all__ = [f.stem for f in python_files] # type: ignore __all__ = list(f.stem for f in python_files) # type: ignore

View File

@ -16,7 +16,6 @@ from pydantic.fields import FieldInfo, _Unset
from pydantic_core import PydanticUndefined from pydantic_core import PydanticUndefined
from invokeai.app.services.config.config_default import InvokeAIAppConfig from invokeai.app.services.config.config_default import InvokeAIAppConfig
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.util.misc import uuid_string from invokeai.app.util.misc import uuid_string
if TYPE_CHECKING: if TYPE_CHECKING:
@ -31,6 +30,70 @@ class InvalidFieldError(TypeError):
pass pass
class FieldDescriptions:
denoising_start = "When to start denoising, expressed a percentage of total steps"
denoising_end = "When to stop denoising, expressed a percentage of total steps"
cfg_scale = "Classifier-Free Guidance scale"
scheduler = "Scheduler to use during inference"
positive_cond = "Positive conditioning tensor"
negative_cond = "Negative conditioning tensor"
noise = "Noise tensor"
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
unet = "UNet (scheduler, LoRAs)"
vae = "VAE"
cond = "Conditioning tensor"
controlnet_model = "ControlNet model to load"
vae_model = "VAE model to load"
lora_model = "LoRA model to load"
main_model = "Main model (UNet, VAE, CLIP) to load"
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
lora_weight = "The weight at which the LoRA is applied to each model"
compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor"
raw_prompt = "Raw prompt text (no parsing)"
sdxl_aesthetic = "The aesthetic score to apply to the conditioning tensor"
skipped_layers = "Number of layers to skip in text encoder"
seed = "Seed for random number generation"
steps = "Number of steps to run"
width = "Width of output (px)"
height = "Height of output (px)"
control = "ControlNet(s) to apply"
ip_adapter = "IP-Adapter to apply"
t2i_adapter = "T2I-Adapter(s) to apply"
denoised_latents = "Denoised latents tensor"
latents = "Latents tensor"
strength = "Strength of denoising (proportional to steps)"
metadata = "Optional metadata to be saved with the image"
metadata_collection = "Collection of Metadata"
metadata_item_polymorphic = "A single metadata item or collection of metadata items"
metadata_item_label = "Label for this metadata item"
metadata_item_value = "The value for this metadata item (may be any type)"
workflow = "Optional workflow to be saved with the image"
interp_mode = "Interpolation mode"
torch_antialias = "Whether or not to apply antialiasing (bilinear or bicubic only)"
fp32 = "Whether or not to use full float32 precision"
precision = "Precision to use"
tiled = "Processing using overlapping tiles (reduce memory consumption)"
detect_res = "Pixel resolution for detection"
image_res = "Pixel resolution for output image"
safe_mode = "Whether or not to use safe mode"
scribble_mode = "Whether or not to use scribble mode"
scale_factor = "The factor by which to scale"
blend_alpha = (
"Blending factor. 0.0 = use input A only, 1.0 = use input B only, 0.5 = 50% mix of input A and input B."
)
num_1 = "The first number"
num_2 = "The second number"
mask = "The mask to use for the operation"
board = "The board to save the image to"
image = "The image to process"
tile_size = "Tile size"
inclusive_low = "The inclusive low value"
exclusive_high = "The exclusive high value"
decimal_places = "The number of decimal places to round to"
class Input(str, Enum): class Input(str, Enum):
""" """
The type of input a field accepts. The type of input a field accepts.
@ -236,35 +299,35 @@ def InputField(
Ignored for non-collection fields. Ignored for non-collection fields.
""" """
json_schema_extra_: dict[str, Any] = { json_schema_extra_: dict[str, Any] = dict(
"input": input, input=input,
"ui_type": ui_type, ui_type=ui_type,
"ui_component": ui_component, ui_component=ui_component,
"ui_hidden": ui_hidden, ui_hidden=ui_hidden,
"ui_order": ui_order, ui_order=ui_order,
"item_default": item_default, item_default=item_default,
"ui_choice_labels": ui_choice_labels, ui_choice_labels=ui_choice_labels,
"_field_kind": "input", _field_kind="input",
} )
field_args = { field_args = dict(
"default": default, default=default,
"default_factory": default_factory, default_factory=default_factory,
"title": title, title=title,
"description": description, description=description,
"pattern": pattern, pattern=pattern,
"strict": strict, strict=strict,
"gt": gt, gt=gt,
"ge": ge, ge=ge,
"lt": lt, lt=lt,
"le": le, le=le,
"multiple_of": multiple_of, multiple_of=multiple_of,
"allow_inf_nan": allow_inf_nan, allow_inf_nan=allow_inf_nan,
"max_digits": max_digits, max_digits=max_digits,
"decimal_places": decimal_places, decimal_places=decimal_places,
"min_length": min_length, min_length=min_length,
"max_length": max_length, max_length=max_length,
} )
""" """
Invocation definitions have their fields typed correctly for their `invoke()` functions. Invocation definitions have their fields typed correctly for their `invoke()` functions.
@ -299,24 +362,24 @@ def InputField(
# because we are manually making fields optional, we need to store the original required bool for reference later # because we are manually making fields optional, we need to store the original required bool for reference later
if default is PydanticUndefined and default_factory is PydanticUndefined: if default is PydanticUndefined and default_factory is PydanticUndefined:
json_schema_extra_.update({"orig_required": True}) json_schema_extra_.update(dict(orig_required=True))
else: else:
json_schema_extra_.update({"orig_required": False}) json_schema_extra_.update(dict(orig_required=False))
# make Input.Any and Input.Connection fields optional, providing None as a default if the field doesn't already have one # make Input.Any and Input.Connection fields optional, providing None as a default if the field doesn't already have one
if (input is Input.Any or input is Input.Connection) and default_factory is PydanticUndefined: if (input is Input.Any or input is Input.Connection) and default_factory is PydanticUndefined:
default_ = None if default is PydanticUndefined else default default_ = None if default is PydanticUndefined else default
provided_args.update({"default": default_}) provided_args.update(dict(default=default_))
if default is not PydanticUndefined: if default is not PydanticUndefined:
# before invoking, we'll grab the original default value and set it on the field if the field wasn't provided a value # before invoking, we'll grab the original default value and set it on the field if the field wasn't provided a value
json_schema_extra_.update({"default": default}) json_schema_extra_.update(dict(default=default))
json_schema_extra_.update({"orig_default": default}) json_schema_extra_.update(dict(orig_default=default))
elif default is not PydanticUndefined and default_factory is PydanticUndefined: elif default is not PydanticUndefined and default_factory is PydanticUndefined:
default_ = default default_ = default
provided_args.update({"default": default_}) provided_args.update(dict(default=default_))
json_schema_extra_.update({"orig_default": default_}) json_schema_extra_.update(dict(orig_default=default_))
elif default_factory is not PydanticUndefined: elif default_factory is not PydanticUndefined:
provided_args.update({"default_factory": default_factory}) provided_args.update(dict(default_factory=default_factory))
# TODO: cannot serialize default_factory... # TODO: cannot serialize default_factory...
# json_schema_extra_.update(dict(orig_default_factory=default_factory)) # json_schema_extra_.update(dict(orig_default_factory=default_factory))
@ -383,12 +446,12 @@ def OutputField(
decimal_places=decimal_places, decimal_places=decimal_places,
min_length=min_length, min_length=min_length,
max_length=max_length, max_length=max_length,
json_schema_extra={ json_schema_extra=dict(
"ui_type": ui_type, ui_type=ui_type,
"ui_hidden": ui_hidden, ui_hidden=ui_hidden,
"ui_order": ui_order, ui_order=ui_order,
"_field_kind": "output", _field_kind="output",
}, ),
) )
@ -460,14 +523,14 @@ class BaseInvocationOutput(BaseModel):
@classmethod @classmethod
def get_output_types(cls) -> Iterable[str]: def get_output_types(cls) -> Iterable[str]:
return (get_type(i) for i in BaseInvocationOutput.get_outputs()) return map(lambda i: get_type(i), BaseInvocationOutput.get_outputs())
@staticmethod @staticmethod
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None: def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None:
# Because we use a pydantic Literal field with default value for the invocation type, # Because we use a pydantic Literal field with default value for the invocation type,
# it will be typed as optional in the OpenAPI schema. Make it required manually. # it will be typed as optional in the OpenAPI schema. Make it required manually.
if "required" not in schema or not isinstance(schema["required"], list): if "required" not in schema or not isinstance(schema["required"], list):
schema["required"] = [] schema["required"] = list()
schema["required"].extend(["type"]) schema["required"].extend(["type"])
model_config = ConfigDict( model_config = ConfigDict(
@ -527,11 +590,16 @@ class BaseInvocation(ABC, BaseModel):
@classmethod @classmethod
def get_invocations_map(cls) -> dict[str, BaseInvocation]: def get_invocations_map(cls) -> dict[str, BaseInvocation]:
# Get the type strings out of the literals and into a dictionary # Get the type strings out of the literals and into a dictionary
return {get_type(i): i for i in BaseInvocation.get_invocations()} return dict(
map(
lambda i: (get_type(i), i),
BaseInvocation.get_invocations(),
)
)
@classmethod @classmethod
def get_invocation_types(cls) -> Iterable[str]: def get_invocation_types(cls) -> Iterable[str]:
return (get_type(i) for i in BaseInvocation.get_invocations()) return map(lambda i: get_type(i), BaseInvocation.get_invocations())
@classmethod @classmethod
def get_output_type(cls) -> BaseInvocationOutput: def get_output_type(cls) -> BaseInvocationOutput:
@ -550,7 +618,7 @@ class BaseInvocation(ABC, BaseModel):
if uiconfig and hasattr(uiconfig, "version"): if uiconfig and hasattr(uiconfig, "version"):
schema["version"] = uiconfig.version schema["version"] = uiconfig.version
if "required" not in schema or not isinstance(schema["required"], list): if "required" not in schema or not isinstance(schema["required"], list):
schema["required"] = [] schema["required"] = list()
schema["required"].extend(["type", "id"]) schema["required"].extend(["type", "id"])
@abstractmethod @abstractmethod
@ -604,15 +672,15 @@ class BaseInvocation(ABC, BaseModel):
id: str = Field( id: str = Field(
default_factory=uuid_string, default_factory=uuid_string,
description="The id of this instance of an invocation. Must be unique among all instances of invocations.", description="The id of this instance of an invocation. Must be unique among all instances of invocations.",
json_schema_extra={"_field_kind": "internal"}, json_schema_extra=dict(_field_kind="internal"),
) )
is_intermediate: bool = Field( is_intermediate: bool = Field(
default=False, default=False,
description="Whether or not this is an intermediate invocation.", description="Whether or not this is an intermediate invocation.",
json_schema_extra={"ui_type": UIType.IsIntermediate, "_field_kind": "internal"}, json_schema_extra=dict(ui_type=UIType.IsIntermediate, _field_kind="internal"),
) )
use_cache: bool = Field( use_cache: bool = Field(
default=True, description="Whether or not to use the cache", json_schema_extra={"_field_kind": "internal"} default=True, description="Whether or not to use the cache", json_schema_extra=dict(_field_kind="internal")
) )
UIConfig: ClassVar[Type[UIConfigBase]] UIConfig: ClassVar[Type[UIConfigBase]]
@ -646,7 +714,7 @@ class _Model(BaseModel):
# Get all pydantic model attrs, methods, etc # Get all pydantic model attrs, methods, etc
RESERVED_PYDANTIC_FIELD_NAMES = {m[0] for m in inspect.getmembers(_Model())} RESERVED_PYDANTIC_FIELD_NAMES = set(map(lambda m: m[0], inspect.getmembers(_Model())))
def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None: def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None:
@ -661,7 +729,9 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None
field_kind = ( field_kind = (
# _field_kind is defined via InputField(), OutputField() or by one of the internal fields defined in this file # _field_kind is defined via InputField(), OutputField() or by one of the internal fields defined in this file
field.json_schema_extra.get("_field_kind", None) if field.json_schema_extra else None field.json_schema_extra.get("_field_kind", None)
if field.json_schema_extra
else None
) )
# must have a field_kind # must have a field_kind
@ -722,7 +792,7 @@ def invocation(
# Add OpenAPI schema extras # Add OpenAPI schema extras
uiconf_name = cls.__qualname__ + ".UIConfig" uiconf_name = cls.__qualname__ + ".UIConfig"
if not hasattr(cls, "UIConfig") or cls.UIConfig.__qualname__ != uiconf_name: if not hasattr(cls, "UIConfig") or cls.UIConfig.__qualname__ != uiconf_name:
cls.UIConfig = type(uiconf_name, (UIConfigBase,), {}) cls.UIConfig = type(uiconf_name, (UIConfigBase,), dict())
if title is not None: if title is not None:
cls.UIConfig.title = title cls.UIConfig.title = title
if tags is not None: if tags is not None:
@ -749,7 +819,7 @@ def invocation(
invocation_type_annotation = Literal[invocation_type] # type: ignore invocation_type_annotation = Literal[invocation_type] # type: ignore
invocation_type_field = Field( invocation_type_field = Field(
title="type", default=invocation_type, json_schema_extra={"_field_kind": "internal"} title="type", default=invocation_type, json_schema_extra=dict(_field_kind="internal")
) )
docstring = cls.__doc__ docstring = cls.__doc__
@ -795,7 +865,7 @@ def invocation_output(
# Add the output type to the model. # Add the output type to the model.
output_type_annotation = Literal[output_type] # type: ignore output_type_annotation = Literal[output_type] # type: ignore
output_type_field = Field(title="type", default=output_type, json_schema_extra={"_field_kind": "internal"}) output_type_field = Field(title="type", default=output_type, json_schema_extra=dict(_field_kind="internal"))
docstring = cls.__doc__ docstring = cls.__doc__
cls = create_model( cls = create_model(
@ -827,7 +897,7 @@ WorkflowFieldValidator = TypeAdapter(WorkflowField)
class WithWorkflow(BaseModel): class WithWorkflow(BaseModel):
workflow: Optional[WorkflowField] = Field( workflow: Optional[WorkflowField] = Field(
default=None, description=FieldDescriptions.workflow, json_schema_extra={"_field_kind": "internal"} default=None, description=FieldDescriptions.workflow, json_schema_extra=dict(_field_kind="internal")
) )
@ -845,5 +915,5 @@ MetadataFieldValidator = TypeAdapter(MetadataField)
class WithMetadata(BaseModel): class WithMetadata(BaseModel):
metadata: Optional[MetadataField] = Field( metadata: Optional[MetadataField] = Field(
default=None, description=FieldDescriptions.metadata, json_schema_extra={"_field_kind": "internal"} default=None, description=FieldDescriptions.metadata, json_schema_extra=dict(_field_kind="internal")
) )

View File

@ -7,7 +7,6 @@ from compel import Compel, ReturnedEmbeddingsType
from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment
from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ( from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
BasicConditioningInfo, BasicConditioningInfo,
ExtraConditioningInfo, ExtraConditioningInfo,
@ -20,6 +19,7 @@ from ...backend.util.devices import torch_dtype
from .baseinvocation import ( from .baseinvocation import (
BaseInvocation, BaseInvocation,
BaseInvocationOutput, BaseInvocationOutput,
FieldDescriptions,
Input, Input,
InputField, InputField,
InvocationContext, InvocationContext,
@ -108,15 +108,13 @@ class CompelInvocation(BaseInvocation):
print(f'Warn: trigger: "{trigger}" not found') print(f'Warn: trigger: "{trigger}" not found')
with ( with (
ModelPatcher.apply_lora_text_encoder(text_encoder_info.context.model, _lora_loader()),
ModelPatcher.apply_ti(tokenizer_info.context.model, text_encoder_info.context.model, ti_list) as ( ModelPatcher.apply_ti(tokenizer_info.context.model, text_encoder_info.context.model, ti_list) as (
tokenizer, tokenizer,
ti_manager, ti_manager,
), ),
text_encoder_info as text_encoder,
# Apply the LoRA after text_encoder has been moved to its target device for faster patching.
ModelPatcher.apply_lora_text_encoder(text_encoder, _lora_loader()),
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
ModelPatcher.apply_clip_skip(text_encoder_info.context.model, self.clip.skipped_layers), ModelPatcher.apply_clip_skip(text_encoder_info.context.model, self.clip.skipped_layers),
text_encoder_info as text_encoder,
): ):
compel = Compel( compel = Compel(
tokenizer=tokenizer, tokenizer=tokenizer,
@ -231,15 +229,13 @@ class SDXLPromptInvocationBase:
print(f'Warn: trigger: "{trigger}" not found') print(f'Warn: trigger: "{trigger}" not found')
with ( with (
ModelPatcher.apply_lora(text_encoder_info.context.model, _lora_loader(), lora_prefix),
ModelPatcher.apply_ti(tokenizer_info.context.model, text_encoder_info.context.model, ti_list) as ( ModelPatcher.apply_ti(tokenizer_info.context.model, text_encoder_info.context.model, ti_list) as (
tokenizer, tokenizer,
ti_manager, ti_manager,
), ),
text_encoder_info as text_encoder,
# Apply the LoRA after text_encoder has been moved to its target device for faster patching.
ModelPatcher.apply_lora(text_encoder, _lora_loader(), lora_prefix),
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
ModelPatcher.apply_clip_skip(text_encoder_info.context.model, clip_field.skipped_layers), ModelPatcher.apply_clip_skip(text_encoder_info.context.model, clip_field.skipped_layers),
text_encoder_info as text_encoder,
): ):
compel = Compel( compel = Compel(
tokenizer=tokenizer, tokenizer=tokenizer,

View File

@ -28,12 +28,12 @@ from pydantic import BaseModel, ConfigDict, Field, field_validator
from invokeai.app.invocations.primitives import ImageField, ImageOutput from invokeai.app.invocations.primitives import ImageField, ImageOutput
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from ...backend.model_management import BaseModelType from ...backend.model_management import BaseModelType
from .baseinvocation import ( from .baseinvocation import (
BaseInvocation, BaseInvocation,
BaseInvocationOutput, BaseInvocationOutput,
FieldDescriptions,
Input, Input,
InputField, InputField,
InvocationContext, InvocationContext,
@ -96,7 +96,7 @@ class ControlOutput(BaseInvocationOutput):
control: ControlField = OutputField(description=FieldDescriptions.control) control: ControlField = OutputField(description=FieldDescriptions.control)
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.0") @invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.0.0")
class ControlNetInvocation(BaseInvocation): class ControlNetInvocation(BaseInvocation):
"""Collects ControlNet info to pass to other nodes""" """Collects ControlNet info to pass to other nodes"""
@ -173,7 +173,7 @@ class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithWorkflow):
title="Canny Processor", title="Canny Processor",
tags=["controlnet", "canny"], tags=["controlnet", "canny"],
category="controlnet", category="controlnet",
version="1.1.0", version="1.0.0",
) )
class CannyImageProcessorInvocation(ImageProcessorInvocation): class CannyImageProcessorInvocation(ImageProcessorInvocation):
"""Canny edge detection for ControlNet""" """Canny edge detection for ControlNet"""
@ -196,7 +196,7 @@ class CannyImageProcessorInvocation(ImageProcessorInvocation):
title="HED (softedge) Processor", title="HED (softedge) Processor",
tags=["controlnet", "hed", "softedge"], tags=["controlnet", "hed", "softedge"],
category="controlnet", category="controlnet",
version="1.1.0", version="1.0.0",
) )
class HedImageProcessorInvocation(ImageProcessorInvocation): class HedImageProcessorInvocation(ImageProcessorInvocation):
"""Applies HED edge detection to image""" """Applies HED edge detection to image"""
@ -225,7 +225,7 @@ class HedImageProcessorInvocation(ImageProcessorInvocation):
title="Lineart Processor", title="Lineart Processor",
tags=["controlnet", "lineart"], tags=["controlnet", "lineart"],
category="controlnet", category="controlnet",
version="1.1.0", version="1.0.0",
) )
class LineartImageProcessorInvocation(ImageProcessorInvocation): class LineartImageProcessorInvocation(ImageProcessorInvocation):
"""Applies line art processing to image""" """Applies line art processing to image"""
@ -247,7 +247,7 @@ class LineartImageProcessorInvocation(ImageProcessorInvocation):
title="Lineart Anime Processor", title="Lineart Anime Processor",
tags=["controlnet", "lineart", "anime"], tags=["controlnet", "lineart", "anime"],
category="controlnet", category="controlnet",
version="1.1.0", version="1.0.0",
) )
class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation): class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
"""Applies line art anime processing to image""" """Applies line art anime processing to image"""
@ -270,7 +270,7 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
title="Openpose Processor", title="Openpose Processor",
tags=["controlnet", "openpose", "pose"], tags=["controlnet", "openpose", "pose"],
category="controlnet", category="controlnet",
version="1.1.0", version="1.0.0",
) )
class OpenposeImageProcessorInvocation(ImageProcessorInvocation): class OpenposeImageProcessorInvocation(ImageProcessorInvocation):
"""Applies Openpose processing to image""" """Applies Openpose processing to image"""
@ -295,7 +295,7 @@ class OpenposeImageProcessorInvocation(ImageProcessorInvocation):
title="Midas Depth Processor", title="Midas Depth Processor",
tags=["controlnet", "midas"], tags=["controlnet", "midas"],
category="controlnet", category="controlnet",
version="1.1.0", version="1.0.0",
) )
class MidasDepthImageProcessorInvocation(ImageProcessorInvocation): class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
"""Applies Midas depth processing to image""" """Applies Midas depth processing to image"""
@ -322,7 +322,7 @@ class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
title="Normal BAE Processor", title="Normal BAE Processor",
tags=["controlnet"], tags=["controlnet"],
category="controlnet", category="controlnet",
version="1.1.0", version="1.0.0",
) )
class NormalbaeImageProcessorInvocation(ImageProcessorInvocation): class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
"""Applies NormalBae processing to image""" """Applies NormalBae processing to image"""
@ -339,7 +339,7 @@ class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
@invocation( @invocation(
"mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.1.0" "mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.0.0"
) )
class MlsdImageProcessorInvocation(ImageProcessorInvocation): class MlsdImageProcessorInvocation(ImageProcessorInvocation):
"""Applies MLSD processing to image""" """Applies MLSD processing to image"""
@ -362,7 +362,7 @@ class MlsdImageProcessorInvocation(ImageProcessorInvocation):
@invocation( @invocation(
"pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.1.0" "pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.0.0"
) )
class PidiImageProcessorInvocation(ImageProcessorInvocation): class PidiImageProcessorInvocation(ImageProcessorInvocation):
"""Applies PIDI processing to image""" """Applies PIDI processing to image"""
@ -389,7 +389,7 @@ class PidiImageProcessorInvocation(ImageProcessorInvocation):
title="Content Shuffle Processor", title="Content Shuffle Processor",
tags=["controlnet", "contentshuffle"], tags=["controlnet", "contentshuffle"],
category="controlnet", category="controlnet",
version="1.1.0", version="1.0.0",
) )
class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation): class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
"""Applies content shuffle processing to image""" """Applies content shuffle processing to image"""
@ -419,7 +419,7 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
title="Zoe (Depth) Processor", title="Zoe (Depth) Processor",
tags=["controlnet", "zoe", "depth"], tags=["controlnet", "zoe", "depth"],
category="controlnet", category="controlnet",
version="1.1.0", version="1.0.0",
) )
class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation): class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
"""Applies Zoe depth processing to image""" """Applies Zoe depth processing to image"""
@ -435,7 +435,7 @@ class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
title="Mediapipe Face Processor", title="Mediapipe Face Processor",
tags=["controlnet", "mediapipe", "face"], tags=["controlnet", "mediapipe", "face"],
category="controlnet", category="controlnet",
version="1.1.0", version="1.0.0",
) )
class MediapipeFaceProcessorInvocation(ImageProcessorInvocation): class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
"""Applies mediapipe face processing to image""" """Applies mediapipe face processing to image"""
@ -458,7 +458,7 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
title="Leres (Depth) Processor", title="Leres (Depth) Processor",
tags=["controlnet", "leres", "depth"], tags=["controlnet", "leres", "depth"],
category="controlnet", category="controlnet",
version="1.1.0", version="1.0.0",
) )
class LeresImageProcessorInvocation(ImageProcessorInvocation): class LeresImageProcessorInvocation(ImageProcessorInvocation):
"""Applies leres processing to image""" """Applies leres processing to image"""
@ -487,7 +487,7 @@ class LeresImageProcessorInvocation(ImageProcessorInvocation):
title="Tile Resample Processor", title="Tile Resample Processor",
tags=["controlnet", "tile"], tags=["controlnet", "tile"],
category="controlnet", category="controlnet",
version="1.1.0", version="1.0.0",
) )
class TileResamplerProcessorInvocation(ImageProcessorInvocation): class TileResamplerProcessorInvocation(ImageProcessorInvocation):
"""Tile resampler processor""" """Tile resampler processor"""
@ -527,7 +527,7 @@ class TileResamplerProcessorInvocation(ImageProcessorInvocation):
title="Segment Anything Processor", title="Segment Anything Processor",
tags=["controlnet", "segmentanything"], tags=["controlnet", "segmentanything"],
category="controlnet", category="controlnet",
version="1.1.0", version="1.0.0",
) )
class SegmentAnythingProcessorInvocation(ImageProcessorInvocation): class SegmentAnythingProcessorInvocation(ImageProcessorInvocation):
"""Applies segment anything processing to image""" """Applies segment anything processing to image"""
@ -569,7 +569,7 @@ class SamDetectorReproducibleColors(SamDetector):
title="Color Map Processor", title="Color Map Processor",
tags=["controlnet"], tags=["controlnet"],
category="controlnet", category="controlnet",
version="1.1.0", version="1.0.0",
) )
class ColorMapImageProcessorInvocation(ImageProcessorInvocation): class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
"""Generates a color map from the provided image""" """Generates a color map from the provided image"""

View File

@ -11,7 +11,7 @@ from invokeai.app.services.image_records.image_records_common import ImageCatego
from .baseinvocation import BaseInvocation, InputField, InvocationContext, WithMetadata, WithWorkflow, invocation from .baseinvocation import BaseInvocation, InputField, InvocationContext, WithMetadata, WithWorkflow, invocation
@invocation("cv_inpaint", title="OpenCV Inpaint", tags=["opencv", "inpaint"], category="inpaint", version="1.1.0") @invocation("cv_inpaint", title="OpenCV Inpaint", tags=["opencv", "inpaint"], category="inpaint", version="1.0.0")
class CvInpaintInvocation(BaseInvocation, WithMetadata, WithWorkflow): class CvInpaintInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Simple inpaint using opencv.""" """Simple inpaint using opencv."""

View File

@ -131,7 +131,7 @@ def prepare_faces_list(
deduped_faces: list[FaceResultData] = [] deduped_faces: list[FaceResultData] = []
if len(face_result_list) == 0: if len(face_result_list) == 0:
return [] return list()
for candidate in face_result_list: for candidate in face_result_list:
should_add = True should_add = True
@ -210,7 +210,7 @@ def generate_face_box_mask(
# Check if any face is detected. # Check if any face is detected.
if results.multi_face_landmarks: # type: ignore # this are via protobuf and not typed if results.multi_face_landmarks: # type: ignore # this are via protobuf and not typed
# Search for the face_id in the detected faces. # Search for the face_id in the detected faces.
for _face_id, face_landmarks in enumerate(results.multi_face_landmarks): # type: ignore #this are via protobuf and not typed for face_id, face_landmarks in enumerate(results.multi_face_landmarks): # type: ignore #this are via protobuf and not typed
# Get the bounding box of the face mesh. # Get the bounding box of the face mesh.
x_coordinates = [landmark.x for landmark in face_landmarks.landmark] x_coordinates = [landmark.x for landmark in face_landmarks.landmark]
y_coordinates = [landmark.y for landmark in face_landmarks.landmark] y_coordinates = [landmark.y for landmark in face_landmarks.landmark]
@ -438,7 +438,7 @@ def get_faces_list(
return all_faces return all_faces
@invocation("face_off", title="FaceOff", tags=["image", "faceoff", "face", "mask"], category="image", version="1.1.0") @invocation("face_off", title="FaceOff", tags=["image", "faceoff", "face", "mask"], category="image", version="1.0.2")
class FaceOffInvocation(BaseInvocation, WithWorkflow, WithMetadata): class FaceOffInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Bound, extract, and mask a face from an image using MediaPipe detection""" """Bound, extract, and mask a face from an image using MediaPipe detection"""
@ -532,7 +532,7 @@ class FaceOffInvocation(BaseInvocation, WithWorkflow, WithMetadata):
return output return output
@invocation("face_mask_detection", title="FaceMask", tags=["image", "face", "mask"], category="image", version="1.1.0") @invocation("face_mask_detection", title="FaceMask", tags=["image", "face", "mask"], category="image", version="1.0.2")
class FaceMaskInvocation(BaseInvocation, WithWorkflow, WithMetadata): class FaceMaskInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Face mask creation using mediapipe face detection""" """Face mask creation using mediapipe face detection"""
@ -650,7 +650,7 @@ class FaceMaskInvocation(BaseInvocation, WithWorkflow, WithMetadata):
@invocation( @invocation(
"face_identifier", title="FaceIdentifier", tags=["image", "face", "identifier"], category="image", version="1.1.0" "face_identifier", title="FaceIdentifier", tags=["image", "face", "identifier"], category="image", version="1.0.2"
) )
class FaceIdentifierInvocation(BaseInvocation, WithWorkflow, WithMetadata): class FaceIdentifierInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Outputs an image with detected face IDs printed on each face. For use with other FaceTools.""" """Outputs an image with detected face IDs printed on each face. For use with other FaceTools."""

View File

@ -5,15 +5,23 @@ from typing import Literal, Optional
import cv2 import cv2
import numpy import numpy
from PIL import Image, ImageChops, ImageFilter, ImageOps from PIL import Image, ImageChops, ImageFilter, ImageOps, ImageDraw
from invokeai.app.invocations.primitives import BoardField, ColorField, ImageField, ImageOutput from invokeai.app.invocations.primitives import BoardField, ColorField, ImageField, ImageOutput
from invokeai.app.services.image_records.image_records_common import ImageCategory, ImageRecordChanges, ResourceOrigin from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
from invokeai.backend.image_util.safety_checker import SafetyChecker from invokeai.backend.image_util.safety_checker import SafetyChecker
from .baseinvocation import BaseInvocation, Input, InputField, InvocationContext, WithMetadata, WithWorkflow, invocation from .baseinvocation import (
BaseInvocation,
FieldDescriptions,
Input,
InputField,
InvocationContext,
WithMetadata,
WithWorkflow,
invocation,
)
@invocation("show_image", title="Show Image", tags=["image"], category="image", version="1.0.0") @invocation("show_image", title="Show Image", tags=["image"], category="image", version="1.0.0")
@ -36,7 +44,7 @@ class ShowImageInvocation(BaseInvocation):
) )
@invocation("blank_image", title="Blank Image", tags=["image"], category="image", version="1.1.0") @invocation("blank_image", title="Blank Image", tags=["image"], category="image", version="1.0.0")
class BlankImageInvocation(BaseInvocation, WithMetadata, WithWorkflow): class BlankImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Creates a blank image and forwards it to the pipeline""" """Creates a blank image and forwards it to the pipeline"""
@ -66,7 +74,135 @@ class BlankImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
) )
@invocation("img_crop", title="Crop Image", tags=["image", "crop"], category="image", version="1.1.0") @invocation("gradient_image", title="Gradient Image", tags=["gradient", "image"], category="image", version="1.0.0")
class GradientImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Creates a variety of gradient images and forwards them to the pipeline"""
width: int = InputField(default=512, description="The width of the image")
height: int = InputField(default=512, description="The height of the image")
mode: Literal["linear", "radial", "noise", "conical", "diamond"] = InputField(
default="linear", description="The type of gradient"
)
color1: ColorField = InputField(
default=ColorField(r=0, g=0, b=0, a=255), description="The starting color of the gradient"
)
color2: ColorField = InputField(
default=ColorField(r=255, g=255, b=255, a=255), description="The ending color of the gradient"
)
orientation_angle: float = InputField(default=0.0, description="The orientation angle of the gradient in degrees")
def _generate_linear_gradient(self) -> Image.Image:
image = Image.new("RGBA", (self.width, self.height), self.color1.tuple())
draw = ImageDraw.Draw(image)
for i in range(self.width):
blended_color = (
int(self.color1.r + (self.color2.r - self.color1.r) * (i / self.width)),
int(self.color1.g + (self.color2.g - self.color1.g) * (i / self.width)),
int(self.color1.b + (self.color2.b - self.color1.b) * (i / self.width)),
int(self.color1.a + (self.color2.a - self.color1.a) * (i / self.width)),
)
draw.line([(i, 0), (i, self.height)], fill=blended_color)
image = image.rotate(self.orientation_angle, expand=True)
return image
def _generate_radial_gradient(self) -> Image.Image:
image = Image.new("RGBA", (self.width, self.height), self.color1.tuple())
draw = ImageDraw.Draw(image)
max_radius = int(((self.width**2) + (self.height**2)) ** 0.5 / 2)
center_x, center_y = self.width // 2, self.height // 2
for r in range(max_radius):
blended_color = (
int(self.color1.r + (self.color2.r - self.color1.r) * (r / max_radius)),
int(self.color1.g + (self.color2.g - self.color1.g) * (r / max_radius)),
int(self.color1.b + (self.color2.b - self.color1.b) * (r / max_radius)),
int(self.color1.a + (self.color2.a - self.color1.a) * (r / max_radius)),
)
draw.ellipse((center_x - r, center_y - r, center_x + r, center_y + r), outline=blended_color, width=1)
return image
def _generate_noise_gradient(self) -> Image.Image:
img_array = numpy.zeros((self.height, self.width, 4), dtype=numpy.uint8)
random_factors = numpy.random.rand(self.height, self.width, 4)
for i, color in enumerate(["r", "g", "b", "a"]):
img_array[..., i] = (
getattr(self.color1, color)
+ (getattr(self.color2, color) - getattr(self.color1, color)) * random_factors[..., i]
)
image = Image.fromarray(img_array.astype("uint8"), "RGBA")
return image
def _generate_conical_gradient(self) -> Image.Image:
image = Image.new("RGBA", (self.width, self.height))
pixels = image.load()
center_x, center_y = self.width // 2, self.height // 2
for x in range(self.width):
for y in range(self.height):
angle = int((180 / numpy.pi) * numpy.arctan2(y - center_y, x - center_x)) % 360
blended_color = (
int(self.color1.r + (self.color2.r - self.color1.r) * (angle / 360)),
int(self.color1.g + (self.color2.g - self.color1.g) * (angle / 360)),
int(self.color1.b + (self.color2.b - self.color1.b) * (angle / 360)),
int(self.color1.a + (self.color2.a - self.color1.a) * (angle / 360)),
)
pixels[x, y] = blended_color
return image
def _generate_diamond_gradient(self) -> Image.Image:
image = Image.new("RGBA", (self.width, self.height))
pixels = image.load()
center_x, center_y = self.width // 2, self.height // 2
for x in range(self.width):
for y in range(self.height):
distance = abs(x - center_x) + abs(y - center_y)
max_distance = self.width // 2 + self.height // 2
blended_color = (
int(self.color1.r + (self.color2.r - self.color1.r) * (distance / max_distance)),
int(self.color1.g + (self.color2.g - self.color1.g) * (distance / max_distance)),
int(self.color1.b + (self.color2.b - self.color1.b) * (distance / max_distance)),
int(self.color1.a + (self.color2.a - self.color1.a) * (distance / max_distance)),
)
pixels[x, y] = blended_color
return image
def invoke(self, context: InvocationContext) -> ImageOutput:
gradient_method_map = {
"linear": self._generate_linear_gradient,
"radial": self._generate_radial_gradient,
"noise": self._generate_noise_gradient,
"conical": self._generate_conical_gradient,
"diamond": self._generate_diamond_gradient,
}
image = gradient_method_map.get(self.mode)()
image_dto = context.services.images.create(
image=image,
image_origin=ResourceOrigin.INTERNAL,
image_category=ImageCategory.GENERAL,
node_id=self.id,
session_id=context.graph_execution_state_id,
is_intermediate=self.is_intermediate,
metadata=self.metadata,
workflow=self.workflow,
)
return ImageOutput(
image=ImageField(image_name=image_dto.image_name),
width=image_dto.width,
height=image_dto.height,
)
@invocation("img_crop", title="Crop Image", tags=["image", "crop"], category="image", version="1.0.0")
class ImageCropInvocation(BaseInvocation, WithWorkflow, WithMetadata): class ImageCropInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Crops an image to a specified box. The box can be outside of the image.""" """Crops an image to a specified box. The box can be outside of the image."""
@ -100,7 +236,7 @@ class ImageCropInvocation(BaseInvocation, WithWorkflow, WithMetadata):
) )
@invocation("img_paste", title="Paste Image", tags=["image", "paste"], category="image", version="1.1.0") @invocation("img_paste", title="Paste Image", tags=["image", "paste"], category="image", version="1.0.1")
class ImagePasteInvocation(BaseInvocation, WithWorkflow, WithMetadata): class ImagePasteInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Pastes an image into another image.""" """Pastes an image into another image."""
@ -154,7 +290,7 @@ class ImagePasteInvocation(BaseInvocation, WithWorkflow, WithMetadata):
) )
@invocation("tomask", title="Mask from Alpha", tags=["image", "mask"], category="image", version="1.1.0") @invocation("tomask", title="Mask from Alpha", tags=["image", "mask"], category="image", version="1.0.0")
class MaskFromAlphaInvocation(BaseInvocation, WithWorkflow, WithMetadata): class MaskFromAlphaInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Extracts the alpha channel of an image as a mask.""" """Extracts the alpha channel of an image as a mask."""
@ -186,7 +322,7 @@ class MaskFromAlphaInvocation(BaseInvocation, WithWorkflow, WithMetadata):
) )
@invocation("img_mul", title="Multiply Images", tags=["image", "multiply"], category="image", version="1.1.0") @invocation("img_mul", title="Multiply Images", tags=["image", "multiply"], category="image", version="1.0.0")
class ImageMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata): class ImageMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Multiplies two images together using `PIL.ImageChops.multiply()`.""" """Multiplies two images together using `PIL.ImageChops.multiply()`."""
@ -220,7 +356,7 @@ class ImageMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata):
IMAGE_CHANNELS = Literal["A", "R", "G", "B"] IMAGE_CHANNELS = Literal["A", "R", "G", "B"]
@invocation("img_chan", title="Extract Image Channel", tags=["image", "channel"], category="image", version="1.1.0") @invocation("img_chan", title="Extract Image Channel", tags=["image", "channel"], category="image", version="1.0.0")
class ImageChannelInvocation(BaseInvocation, WithWorkflow, WithMetadata): class ImageChannelInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Gets a channel from an image.""" """Gets a channel from an image."""
@ -253,7 +389,7 @@ class ImageChannelInvocation(BaseInvocation, WithWorkflow, WithMetadata):
IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"] IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"]
@invocation("img_conv", title="Convert Image Mode", tags=["image", "convert"], category="image", version="1.1.0") @invocation("img_conv", title="Convert Image Mode", tags=["image", "convert"], category="image", version="1.0.0")
class ImageConvertInvocation(BaseInvocation, WithWorkflow, WithMetadata): class ImageConvertInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Converts an image to a different mode.""" """Converts an image to a different mode."""
@ -283,7 +419,7 @@ class ImageConvertInvocation(BaseInvocation, WithWorkflow, WithMetadata):
) )
@invocation("img_blur", title="Blur Image", tags=["image", "blur"], category="image", version="1.1.0") @invocation("img_blur", title="Blur Image", tags=["image", "blur"], category="image", version="1.0.0")
class ImageBlurInvocation(BaseInvocation, WithWorkflow, WithMetadata): class ImageBlurInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Blurs an image""" """Blurs an image"""
@ -338,7 +474,7 @@ PIL_RESAMPLING_MAP = {
} }
@invocation("img_resize", title="Resize Image", tags=["image", "resize"], category="image", version="1.1.0") @invocation("img_resize", title="Resize Image", tags=["image", "resize"], category="image", version="1.0.0")
class ImageResizeInvocation(BaseInvocation, WithMetadata, WithWorkflow): class ImageResizeInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Resizes an image to specific dimensions""" """Resizes an image to specific dimensions"""
@ -375,7 +511,7 @@ class ImageResizeInvocation(BaseInvocation, WithMetadata, WithWorkflow):
) )
@invocation("img_scale", title="Scale Image", tags=["image", "scale"], category="image", version="1.1.0") @invocation("img_scale", title="Scale Image", tags=["image", "scale"], category="image", version="1.0.0")
class ImageScaleInvocation(BaseInvocation, WithMetadata, WithWorkflow): class ImageScaleInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Scales an image by a factor""" """Scales an image by a factor"""
@ -417,7 +553,7 @@ class ImageScaleInvocation(BaseInvocation, WithMetadata, WithWorkflow):
) )
@invocation("img_lerp", title="Lerp Image", tags=["image", "lerp"], category="image", version="1.1.0") @invocation("img_lerp", title="Lerp Image", tags=["image", "lerp"], category="image", version="1.0.0")
class ImageLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata): class ImageLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Linear interpolation of all pixels of an image""" """Linear interpolation of all pixels of an image"""
@ -451,7 +587,7 @@ class ImageLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata):
) )
@invocation("img_ilerp", title="Inverse Lerp Image", tags=["image", "ilerp"], category="image", version="1.1.0") @invocation("img_ilerp", title="Inverse Lerp Image", tags=["image", "ilerp"], category="image", version="1.0.0")
class ImageInverseLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata): class ImageInverseLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Inverse linear interpolation of all pixels of an image""" """Inverse linear interpolation of all pixels of an image"""
@ -485,7 +621,7 @@ class ImageInverseLerpInvocation(BaseInvocation, WithWorkflow, WithMetadata):
) )
@invocation("img_nsfw", title="Blur NSFW Image", tags=["image", "nsfw"], category="image", version="1.1.0") @invocation("img_nsfw", title="Blur NSFW Image", tags=["image", "nsfw"], category="image", version="1.0.0")
class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithWorkflow): class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Add blur to NSFW-flagged images""" """Add blur to NSFW-flagged images"""
@ -532,7 +668,7 @@ class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithWorkflow):
title="Add Invisible Watermark", title="Add Invisible Watermark",
tags=["image", "watermark"], tags=["image", "watermark"],
category="image", category="image",
version="1.1.0", version="1.0.0",
) )
class ImageWatermarkInvocation(BaseInvocation, WithMetadata, WithWorkflow): class ImageWatermarkInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Add an invisible watermark to an image""" """Add an invisible watermark to an image"""
@ -561,7 +697,7 @@ class ImageWatermarkInvocation(BaseInvocation, WithMetadata, WithWorkflow):
) )
@invocation("mask_edge", title="Mask Edge", tags=["image", "mask", "inpaint"], category="image", version="1.1.0") @invocation("mask_edge", title="Mask Edge", tags=["image", "mask", "inpaint"], category="image", version="1.0.0")
class MaskEdgeInvocation(BaseInvocation, WithWorkflow, WithMetadata): class MaskEdgeInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Applies an edge mask to an image""" """Applies an edge mask to an image"""
@ -612,7 +748,7 @@ class MaskEdgeInvocation(BaseInvocation, WithWorkflow, WithMetadata):
title="Combine Masks", title="Combine Masks",
tags=["image", "mask", "multiply"], tags=["image", "mask", "multiply"],
category="image", category="image",
version="1.1.0", version="1.0.0",
) )
class MaskCombineInvocation(BaseInvocation, WithWorkflow, WithMetadata): class MaskCombineInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`.""" """Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`."""
@ -644,7 +780,7 @@ class MaskCombineInvocation(BaseInvocation, WithWorkflow, WithMetadata):
) )
@invocation("color_correct", title="Color Correct", tags=["image", "color"], category="image", version="1.1.0") @invocation("color_correct", title="Color Correct", tags=["image", "color"], category="image", version="1.0.0")
class ColorCorrectInvocation(BaseInvocation, WithWorkflow, WithMetadata): class ColorCorrectInvocation(BaseInvocation, WithWorkflow, WithMetadata):
""" """
Shifts the colors of a target image to match the reference image, optionally Shifts the colors of a target image to match the reference image, optionally
@ -755,7 +891,7 @@ class ColorCorrectInvocation(BaseInvocation, WithWorkflow, WithMetadata):
) )
@invocation("img_hue_adjust", title="Adjust Image Hue", tags=["image", "hue"], category="image", version="1.1.0") @invocation("img_hue_adjust", title="Adjust Image Hue", tags=["image", "hue"], category="image", version="1.0.0")
class ImageHueAdjustmentInvocation(BaseInvocation, WithWorkflow, WithMetadata): class ImageHueAdjustmentInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Adjusts the Hue of an image.""" """Adjusts the Hue of an image."""
@ -858,7 +994,7 @@ CHANNEL_FORMATS = {
"value", "value",
], ],
category="image", category="image",
version="1.1.0", version="1.0.0",
) )
class ImageChannelOffsetInvocation(BaseInvocation, WithWorkflow, WithMetadata): class ImageChannelOffsetInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Add or subtract a value from a specific color channel of an image.""" """Add or subtract a value from a specific color channel of an image."""
@ -929,7 +1065,7 @@ class ImageChannelOffsetInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"value", "value",
], ],
category="image", category="image",
version="1.1.0", version="1.0.0",
) )
class ImageChannelMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata): class ImageChannelMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Scale a specific color channel of an image.""" """Scale a specific color channel of an image."""
@ -988,7 +1124,7 @@ class ImageChannelMultiplyInvocation(BaseInvocation, WithWorkflow, WithMetadata)
title="Save Image", title="Save Image",
tags=["primitives", "image"], tags=["primitives", "image"],
category="primitives", category="primitives",
version="1.1.0", version="1.0.1",
use_cache=False, use_cache=False,
) )
class SaveImageInvocation(BaseInvocation, WithWorkflow, WithMetadata): class SaveImageInvocation(BaseInvocation, WithWorkflow, WithMetadata):
@ -1017,35 +1153,3 @@ class SaveImageInvocation(BaseInvocation, WithWorkflow, WithMetadata):
width=image_dto.width, width=image_dto.width,
height=image_dto.height, height=image_dto.height,
) )
@invocation(
"linear_ui_output",
title="Linear UI Image Output",
tags=["primitives", "image"],
category="primitives",
version="1.0.1",
use_cache=False,
)
class LinearUIOutputInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Handles Linear UI Image Outputting tasks."""
image: ImageField = InputField(description=FieldDescriptions.image)
board: Optional[BoardField] = InputField(default=None, description=FieldDescriptions.board, input=Input.Direct)
def invoke(self, context: InvocationContext) -> ImageOutput:
image_dto = context.services.images.get_dto(self.image.image_name)
if self.board:
context.services.board_images.add_image_to_board(self.board.board_id, self.image.image_name)
if image_dto.is_intermediate != self.is_intermediate:
context.services.images.update(
self.image.image_name, changes=ImageRecordChanges(is_intermediate=self.is_intermediate)
)
return ImageOutput(
image=ImageField(image_name=self.image.image_name),
width=image_dto.width,
height=image_dto.height,
)

View File

@ -118,7 +118,7 @@ def tile_fill_missing(im: Image.Image, tile_size: int = 16, seed: Optional[int]
return si return si
@invocation("infill_rgba", title="Solid Color Infill", tags=["image", "inpaint"], category="inpaint", version="1.1.0") @invocation("infill_rgba", title="Solid Color Infill", tags=["image", "inpaint"], category="inpaint", version="1.0.0")
class InfillColorInvocation(BaseInvocation, WithWorkflow, WithMetadata): class InfillColorInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Infills transparent areas of an image with a solid color""" """Infills transparent areas of an image with a solid color"""
@ -154,7 +154,7 @@ class InfillColorInvocation(BaseInvocation, WithWorkflow, WithMetadata):
) )
@invocation("infill_tile", title="Tile Infill", tags=["image", "inpaint"], category="inpaint", version="1.1.0") @invocation("infill_tile", title="Tile Infill", tags=["image", "inpaint"], category="inpaint", version="1.0.0")
class InfillTileInvocation(BaseInvocation, WithWorkflow, WithMetadata): class InfillTileInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Infills transparent areas of an image with tiles of the image""" """Infills transparent areas of an image with tiles of the image"""
@ -192,7 +192,7 @@ class InfillTileInvocation(BaseInvocation, WithWorkflow, WithMetadata):
@invocation( @invocation(
"infill_patchmatch", title="PatchMatch Infill", tags=["image", "inpaint"], category="inpaint", version="1.1.0" "infill_patchmatch", title="PatchMatch Infill", tags=["image", "inpaint"], category="inpaint", version="1.0.0"
) )
class InfillPatchMatchInvocation(BaseInvocation, WithWorkflow, WithMetadata): class InfillPatchMatchInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Infills transparent areas of an image using the PatchMatch algorithm""" """Infills transparent areas of an image using the PatchMatch algorithm"""
@ -245,7 +245,7 @@ class InfillPatchMatchInvocation(BaseInvocation, WithWorkflow, WithMetadata):
) )
@invocation("infill_lama", title="LaMa Infill", tags=["image", "inpaint"], category="inpaint", version="1.1.0") @invocation("infill_lama", title="LaMa Infill", tags=["image", "inpaint"], category="inpaint", version="1.0.0")
class LaMaInfillInvocation(BaseInvocation, WithWorkflow, WithMetadata): class LaMaInfillInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Infills transparent areas of an image using the LaMa model""" """Infills transparent areas of an image using the LaMa model"""
@ -274,7 +274,7 @@ class LaMaInfillInvocation(BaseInvocation, WithWorkflow, WithMetadata):
) )
@invocation("infill_cv2", title="CV2 Infill", tags=["image", "inpaint"], category="inpaint", version="1.1.0") @invocation("infill_cv2", title="CV2 Infill", tags=["image", "inpaint"], category="inpaint")
class CV2InfillInvocation(BaseInvocation, WithWorkflow, WithMetadata): class CV2InfillInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Infills transparent areas of an image using OpenCV Inpainting""" """Infills transparent areas of an image using OpenCV Inpainting"""

View File

@ -7,6 +7,7 @@ from pydantic import BaseModel, ConfigDict, Field
from invokeai.app.invocations.baseinvocation import ( from invokeai.app.invocations.baseinvocation import (
BaseInvocation, BaseInvocation,
BaseInvocationOutput, BaseInvocationOutput,
FieldDescriptions,
Input, Input,
InputField, InputField,
InvocationContext, InvocationContext,
@ -16,7 +17,6 @@ from invokeai.app.invocations.baseinvocation import (
invocation_output, invocation_output,
) )
from invokeai.app.invocations.primitives import ImageField from invokeai.app.invocations.primitives import ImageField
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.model_management.models.base import BaseModelType, ModelType from invokeai.backend.model_management.models.base import BaseModelType, ModelType
from invokeai.backend.model_management.models.ip_adapter import get_ip_adapter_image_encoder_model_id from invokeai.backend.model_management.models.ip_adapter import get_ip_adapter_image_encoder_model_id

View File

@ -10,7 +10,7 @@ import torch
import torchvision.transforms as T import torchvision.transforms as T
from diffusers import AutoencoderKL, AutoencoderTiny from diffusers import AutoencoderKL, AutoencoderTiny
from diffusers.image_processor import VaeImageProcessor from diffusers.image_processor import VaeImageProcessor
from diffusers.models.adapter import T2IAdapter from diffusers.models.adapter import FullAdapterXL, T2IAdapter
from diffusers.models.attention_processor import ( from diffusers.models.attention_processor import (
AttnProcessor2_0, AttnProcessor2_0,
LoRAAttnProcessor2_0, LoRAAttnProcessor2_0,
@ -34,7 +34,6 @@ from invokeai.app.invocations.primitives import (
) )
from invokeai.app.invocations.t2i_adapter import T2IAdapterField from invokeai.app.invocations.t2i_adapter import T2IAdapterField
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.util.controlnet_utils import prepare_control_image from invokeai.app.util.controlnet_utils import prepare_control_image
from invokeai.app.util.step_callback import stable_diffusion_step_callback from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter, IPAdapterPlus from invokeai.backend.ip_adapter.ip_adapter import IPAdapter, IPAdapterPlus
@ -58,6 +57,7 @@ from ...backend.util.devices import choose_precision, choose_torch_device
from .baseinvocation import ( from .baseinvocation import (
BaseInvocation, BaseInvocation,
BaseInvocationOutput, BaseInvocationOutput,
FieldDescriptions,
Input, Input,
InputField, InputField,
InvocationContext, InvocationContext,
@ -77,7 +77,7 @@ if choose_torch_device() == torch.device("mps"):
DEFAULT_PRECISION = choose_precision(choose_torch_device()) DEFAULT_PRECISION = choose_precision(choose_torch_device())
SAMPLER_NAME_VALUES = Literal[tuple(SCHEDULER_MAP.keys())] SAMPLER_NAME_VALUES = Literal[tuple(list(SCHEDULER_MAP.keys()))]
@invocation_output("scheduler_output") @invocation_output("scheduler_output")
@ -562,6 +562,10 @@ class DenoiseLatentsInvocation(BaseInvocation):
t2i_adapter_model: T2IAdapter t2i_adapter_model: T2IAdapter
with t2i_adapter_model_info as t2i_adapter_model: with t2i_adapter_model_info as t2i_adapter_model:
total_downscale_factor = t2i_adapter_model.total_downscale_factor total_downscale_factor = t2i_adapter_model.total_downscale_factor
if isinstance(t2i_adapter_model.adapter, FullAdapterXL):
# HACK(ryand): Work around a bug in FullAdapterXL. This is being addressed upstream in diffusers by
# this PR: https://github.com/huggingface/diffusers/pull/5134.
total_downscale_factor = total_downscale_factor // 2
# Resize the T2I-Adapter input image. # Resize the T2I-Adapter input image.
# We select the resize dimensions so that after the T2I-Adapter's total_downscale_factor is applied, the # We select the resize dimensions so that after the T2I-Adapter's total_downscale_factor is applied, the
@ -707,11 +711,8 @@ class DenoiseLatentsInvocation(BaseInvocation):
with ( with (
ExitStack() as exit_stack, ExitStack() as exit_stack,
ModelPatcher.apply_lora_unet(unet_info.context.model, _lora_loader()), ModelPatcher.apply_lora_unet(unet_info.context.model, _lora_loader()),
ModelPatcher.apply_freeu(unet_info.context.model, self.unet.freeu_config),
set_seamless(unet_info.context.model, self.unet.seamless_axes), set_seamless(unet_info.context.model, self.unet.seamless_axes),
unet_info as unet, unet_info as unet,
# Apply the LoRA after unet has been moved to its target device for faster patching.
ModelPatcher.apply_lora_unet(unet, _lora_loader()),
): ):
latents = latents.to(device=unet.device, dtype=unet.dtype) latents = latents.to(device=unet.device, dtype=unet.dtype)
if noise is not None: if noise is not None:
@ -790,7 +791,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
title="Latents to Image", title="Latents to Image",
tags=["latents", "image", "vae", "l2i"], tags=["latents", "image", "vae", "l2i"],
category="latents", category="latents",
version="1.1.0", version="1.0.0",
) )
class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithWorkflow): class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Generates an image from latents.""" """Generates an image from latents."""
@ -1105,7 +1106,7 @@ class BlendLatentsInvocation(BaseInvocation):
latents_b = context.services.latents.get(self.latents_b.latents_name) latents_b = context.services.latents.get(self.latents_b.latents_name)
if latents_a.shape != latents_b.shape: if latents_a.shape != latents_b.shape:
raise Exception("Latents to blend must be the same size.") raise "Latents to blend must be the same size."
# TODO: # TODO:
device = choose_torch_device() device = choose_torch_device()

View File

@ -6,9 +6,8 @@ import numpy as np
from pydantic import ValidationInfo, field_validator from pydantic import ValidationInfo, field_validator
from invokeai.app.invocations.primitives import FloatOutput, IntegerOutput from invokeai.app.invocations.primitives import FloatOutput, IntegerOutput
from invokeai.app.shared.fields import FieldDescriptions
from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation from .baseinvocation import BaseInvocation, FieldDescriptions, InputField, InvocationContext, invocation
@invocation("add", title="Add Integers", tags=["math", "add"], category="math", version="1.0.0") @invocation("add", title="Add Integers", tags=["math", "add"], category="math", version="1.0.0")
@ -145,17 +144,17 @@ INTEGER_OPERATIONS = Literal[
] ]
INTEGER_OPERATIONS_LABELS = { INTEGER_OPERATIONS_LABELS = dict(
"ADD": "Add A+B", ADD="Add A+B",
"SUB": "Subtract A-B", SUB="Subtract A-B",
"MUL": "Multiply A*B", MUL="Multiply A*B",
"DIV": "Divide A/B", DIV="Divide A/B",
"EXP": "Exponentiate A^B", EXP="Exponentiate A^B",
"MOD": "Modulus A%B", MOD="Modulus A%B",
"ABS": "Absolute Value of A", ABS="Absolute Value of A",
"MIN": "Minimum(A,B)", MIN="Minimum(A,B)",
"MAX": "Maximum(A,B)", MAX="Maximum(A,B)",
} )
@invocation( @invocation(
@ -183,8 +182,8 @@ class IntegerMathInvocation(BaseInvocation):
operation: INTEGER_OPERATIONS = InputField( operation: INTEGER_OPERATIONS = InputField(
default="ADD", description="The operation to perform", ui_choice_labels=INTEGER_OPERATIONS_LABELS default="ADD", description="The operation to perform", ui_choice_labels=INTEGER_OPERATIONS_LABELS
) )
a: int = InputField(default=1, description=FieldDescriptions.num_1) a: int = InputField(default=0, description=FieldDescriptions.num_1)
b: int = InputField(default=1, description=FieldDescriptions.num_2) b: int = InputField(default=0, description=FieldDescriptions.num_2)
@field_validator("b") @field_validator("b")
def no_unrepresentable_results(cls, v: int, info: ValidationInfo): def no_unrepresentable_results(cls, v: int, info: ValidationInfo):
@ -231,17 +230,17 @@ FLOAT_OPERATIONS = Literal[
] ]
FLOAT_OPERATIONS_LABELS = { FLOAT_OPERATIONS_LABELS = dict(
"ADD": "Add A+B", ADD="Add A+B",
"SUB": "Subtract A-B", SUB="Subtract A-B",
"MUL": "Multiply A*B", MUL="Multiply A*B",
"DIV": "Divide A/B", DIV="Divide A/B",
"EXP": "Exponentiate A^B", EXP="Exponentiate A^B",
"ABS": "Absolute Value of A", ABS="Absolute Value of A",
"SQRT": "Square Root of A", SQRT="Square Root of A",
"MIN": "Minimum(A,B)", MIN="Minimum(A,B)",
"MAX": "Maximum(A,B)", MAX="Maximum(A,B)",
} )
@invocation( @invocation(
@ -257,8 +256,8 @@ class FloatMathInvocation(BaseInvocation):
operation: FLOAT_OPERATIONS = InputField( operation: FLOAT_OPERATIONS = InputField(
default="ADD", description="The operation to perform", ui_choice_labels=FLOAT_OPERATIONS_LABELS default="ADD", description="The operation to perform", ui_choice_labels=FLOAT_OPERATIONS_LABELS
) )
a: float = InputField(default=1, description=FieldDescriptions.num_1) a: float = InputField(default=0, description=FieldDescriptions.num_1)
b: float = InputField(default=1, description=FieldDescriptions.num_2) b: float = InputField(default=0, description=FieldDescriptions.num_2)
@field_validator("b") @field_validator("b")
def no_unrepresentable_results(cls, v: float, info: ValidationInfo): def no_unrepresentable_results(cls, v: float, info: ValidationInfo):
@ -266,7 +265,7 @@ class FloatMathInvocation(BaseInvocation):
raise ValueError("Cannot divide by zero") raise ValueError("Cannot divide by zero")
elif info.data["operation"] == "EXP" and info.data["a"] == 0 and v < 0: elif info.data["operation"] == "EXP" and info.data["a"] == 0 and v < 0:
raise ValueError("Cannot raise zero to a negative power") raise ValueError("Cannot raise zero to a negative power")
elif info.data["operation"] == "EXP" and isinstance(info.data["a"] ** v, complex): elif info.data["operation"] == "EXP" and type(info.data["a"] ** v) is complex:
raise ValueError("Root operation resulted in a complex number") raise ValueError("Root operation resulted in a complex number")
return v return v

View File

@ -5,6 +5,7 @@ from pydantic import BaseModel, ConfigDict, Field
from invokeai.app.invocations.baseinvocation import ( from invokeai.app.invocations.baseinvocation import (
BaseInvocation, BaseInvocation,
BaseInvocationOutput, BaseInvocationOutput,
FieldDescriptions,
InputField, InputField,
InvocationContext, InvocationContext,
MetadataField, MetadataField,
@ -18,7 +19,6 @@ from invokeai.app.invocations.ip_adapter import IPAdapterModelField
from invokeai.app.invocations.model import LoRAModelField, MainModelField, VAEModelField from invokeai.app.invocations.model import LoRAModelField, MainModelField, VAEModelField
from invokeai.app.invocations.primitives import ImageField from invokeai.app.invocations.primitives import ImageField
from invokeai.app.invocations.t2i_adapter import T2IAdapterField from invokeai.app.invocations.t2i_adapter import T2IAdapterField
from invokeai.app.shared.fields import FieldDescriptions
from ...version import __version__ from ...version import __version__
@ -112,7 +112,7 @@ GENERATION_MODES = Literal[
] ]
@invocation("core_metadata", title="Core Metadata", tags=["metadata"], category="metadata", version="1.0.1") @invocation("core_metadata", title="Core Metadata", tags=["metadata"], category="metadata", version="1.0.0")
class CoreMetadataInvocation(BaseInvocation): class CoreMetadataInvocation(BaseInvocation):
"""Collects core generation metadata into a MetadataField""" """Collects core generation metadata into a MetadataField"""
@ -160,14 +160,13 @@ class CoreMetadataInvocation(BaseInvocation):
) )
# High resolution fix metadata. # High resolution fix metadata.
hrf_enabled: Optional[bool] = InputField( hrf_width: Optional[int] = InputField(
default=None, default=None,
description="Whether or not high resolution fix was enabled.", description="The high resolution fix height and width multipler.",
) )
# TODO: should this be stricter or do we just let the UI handle it? hrf_height: Optional[int] = InputField(
hrf_method: Optional[str] = InputField(
default=None, default=None,
description="The high resolution fix upscale method.", description="The high resolution fix height and width multipler.",
) )
hrf_strength: Optional[float] = InputField( hrf_strength: Optional[float] = InputField(
default=None, default=None,

View File

@ -3,13 +3,11 @@ from typing import List, Optional
from pydantic import BaseModel, ConfigDict, Field from pydantic import BaseModel, ConfigDict, Field
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.shared.models import FreeUConfig
from ...backend.model_management import BaseModelType, ModelType, SubModelType from ...backend.model_management import BaseModelType, ModelType, SubModelType
from .baseinvocation import ( from .baseinvocation import (
BaseInvocation, BaseInvocation,
BaseInvocationOutput, BaseInvocationOutput,
FieldDescriptions,
Input, Input,
InputField, InputField,
InvocationContext, InvocationContext,
@ -38,7 +36,6 @@ class UNetField(BaseModel):
scheduler: ModelInfo = Field(description="Info to load scheduler submodel") scheduler: ModelInfo = Field(description="Info to load scheduler submodel")
loras: List[LoraInfo] = Field(description="Loras to apply on model loading") loras: List[LoraInfo] = Field(description="Loras to apply on model loading")
seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless') seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless')
freeu_config: Optional[FreeUConfig] = Field(default=None, description="FreeU configuration")
class ClipField(BaseModel): class ClipField(BaseModel):
@ -54,32 +51,13 @@ class VaeField(BaseModel):
seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless') seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless')
@invocation_output("unet_output")
class UNetOutput(BaseInvocationOutput):
"""Base class for invocations that output a UNet field"""
unet: UNetField = OutputField(description=FieldDescriptions.unet, title="UNet")
@invocation_output("vae_output")
class VAEOutput(BaseInvocationOutput):
"""Base class for invocations that output a VAE field"""
vae: VaeField = OutputField(description=FieldDescriptions.vae, title="VAE")
@invocation_output("clip_output")
class CLIPOutput(BaseInvocationOutput):
"""Base class for invocations that output a CLIP field"""
clip: ClipField = OutputField(description=FieldDescriptions.clip, title="CLIP")
@invocation_output("model_loader_output") @invocation_output("model_loader_output")
class ModelLoaderOutput(UNetOutput, CLIPOutput, VAEOutput): class ModelLoaderOutput(BaseInvocationOutput):
"""Model loader output""" """Model loader output"""
pass unet: UNetField = OutputField(description=FieldDescriptions.unet, title="UNet")
clip: ClipField = OutputField(description=FieldDescriptions.clip, title="CLIP")
vae: VaeField = OutputField(description=FieldDescriptions.vae, title="VAE")
class MainModelField(BaseModel): class MainModelField(BaseModel):
@ -388,6 +366,13 @@ class VAEModelField(BaseModel):
model_config = ConfigDict(protected_namespaces=()) model_config = ConfigDict(protected_namespaces=())
@invocation_output("vae_loader_output")
class VaeLoaderOutput(BaseInvocationOutput):
"""VAE output"""
vae: VaeField = OutputField(description=FieldDescriptions.vae, title="VAE")
@invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.0") @invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.0")
class VaeLoaderInvocation(BaseInvocation): class VaeLoaderInvocation(BaseInvocation):
"""Loads a VAE model, outputting a VaeLoaderOutput""" """Loads a VAE model, outputting a VaeLoaderOutput"""
@ -399,7 +384,7 @@ class VaeLoaderInvocation(BaseInvocation):
title="VAE", title="VAE",
) )
def invoke(self, context: InvocationContext) -> VAEOutput: def invoke(self, context: InvocationContext) -> VaeLoaderOutput:
base_model = self.vae_model.base_model base_model = self.vae_model.base_model
model_name = self.vae_model.model_name model_name = self.vae_model.model_name
model_type = ModelType.Vae model_type = ModelType.Vae
@ -410,7 +395,7 @@ class VaeLoaderInvocation(BaseInvocation):
model_type=model_type, model_type=model_type,
): ):
raise Exception(f"Unkown vae name: {model_name}!") raise Exception(f"Unkown vae name: {model_name}!")
return VAEOutput( return VaeLoaderOutput(
vae=VaeField( vae=VaeField(
vae=ModelInfo( vae=ModelInfo(
model_name=model_name, model_name=model_name,
@ -472,24 +457,3 @@ class SeamlessModeInvocation(BaseInvocation):
vae.seamless_axes = seamless_axes_list vae.seamless_axes = seamless_axes_list
return SeamlessModeOutput(unet=unet, vae=vae) return SeamlessModeOutput(unet=unet, vae=vae)
@invocation("freeu", title="FreeU", tags=["freeu"], category="unet", version="1.0.0")
class FreeUInvocation(BaseInvocation):
"""
Applies FreeU to the UNet. Suggested values (b1/b2/s1/s2):
SD1.5: 1.2/1.4/0.9/0.2,
SD2: 1.1/1.2/0.9/0.2,
SDXL: 1.1/1.2/0.6/0.4,
"""
unet: UNetField = InputField(description=FieldDescriptions.unet, input=Input.Connection, title="UNet")
b1: float = InputField(default=1.2, ge=-1, le=3, description=FieldDescriptions.freeu_b1)
b2: float = InputField(default=1.4, ge=-1, le=3, description=FieldDescriptions.freeu_b2)
s1: float = InputField(default=0.9, ge=-1, le=3, description=FieldDescriptions.freeu_s1)
s2: float = InputField(default=0.2, ge=-1, le=3, description=FieldDescriptions.freeu_s2)
def invoke(self, context: InvocationContext) -> UNetOutput:
self.unet.freeu_config = FreeUConfig(s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2)
return UNetOutput(unet=self.unet)

View File

@ -5,13 +5,13 @@ import torch
from pydantic import field_validator from pydantic import field_validator
from invokeai.app.invocations.latent import LatentsField from invokeai.app.invocations.latent import LatentsField
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.util.misc import SEED_MAX, get_random_seed from invokeai.app.util.misc import SEED_MAX, get_random_seed
from ...backend.util.devices import choose_torch_device, torch_dtype from ...backend.util.devices import choose_torch_device, torch_dtype
from .baseinvocation import ( from .baseinvocation import (
BaseInvocation, BaseInvocation,
BaseInvocationOutput, BaseInvocationOutput,
FieldDescriptions,
InputField, InputField,
InvocationContext, InvocationContext,
OutputField, OutputField,

View File

@ -14,7 +14,6 @@ from tqdm import tqdm
from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput, ImageField, ImageOutput from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput, ImageField, ImageOutput
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.util.step_callback import stable_diffusion_step_callback from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend import BaseModelType, ModelType, SubModelType from invokeai.backend import BaseModelType, ModelType, SubModelType
@ -24,6 +23,7 @@ from ...backend.util import choose_torch_device
from .baseinvocation import ( from .baseinvocation import (
BaseInvocation, BaseInvocation,
BaseInvocationOutput, BaseInvocationOutput,
FieldDescriptions,
Input, Input,
InputField, InputField,
InvocationContext, InvocationContext,
@ -54,7 +54,7 @@ ORT_TO_NP_TYPE = {
"tensor(double)": np.float64, "tensor(double)": np.float64,
} }
PRECISION_VALUES = Literal[tuple(ORT_TO_NP_TYPE.keys())] PRECISION_VALUES = Literal[tuple(list(ORT_TO_NP_TYPE.keys()))]
@invocation("prompt_onnx", title="ONNX Prompt (Raw)", tags=["prompt", "onnx"], category="conditioning", version="1.0.0") @invocation("prompt_onnx", title="ONNX Prompt (Raw)", tags=["prompt", "onnx"], category="conditioning", version="1.0.0")
@ -252,7 +252,7 @@ class ONNXTextToLatentsInvocation(BaseInvocation):
scheduler.set_timesteps(self.steps) scheduler.set_timesteps(self.steps)
latents = latents * np.float64(scheduler.init_noise_sigma) latents = latents * np.float64(scheduler.init_noise_sigma)
extra_step_kwargs = {} extra_step_kwargs = dict()
if "eta" in set(inspect.signature(scheduler.step).parameters.keys()): if "eta" in set(inspect.signature(scheduler.step).parameters.keys()):
extra_step_kwargs.update( extra_step_kwargs.update(
eta=0.0, eta=0.0,
@ -326,7 +326,7 @@ class ONNXTextToLatentsInvocation(BaseInvocation):
title="ONNX Latents to Image", title="ONNX Latents to Image",
tags=["latents", "image", "vae", "onnx"], tags=["latents", "image", "vae", "onnx"],
category="image", category="image",
version="1.1.0", version="1.0.0",
) )
class ONNXLatentsToImageInvocation(BaseInvocation, WithMetadata, WithWorkflow): class ONNXLatentsToImageInvocation(BaseInvocation, WithMetadata, WithWorkflow):
"""Generates an image from latents.""" """Generates an image from latents."""

View File

@ -100,7 +100,7 @@ EASING_FUNCTIONS_MAP = {
"BounceInOut": BounceEaseInOut, "BounceInOut": BounceEaseInOut,
} }
EASING_FUNCTION_KEYS = Literal[tuple(EASING_FUNCTIONS_MAP.keys())] EASING_FUNCTION_KEYS = Literal[tuple(list(EASING_FUNCTIONS_MAP.keys()))]
# actually I think for now could just use CollectionOutput (which is list[Any] # actually I think for now could just use CollectionOutput (which is list[Any]
@ -161,7 +161,7 @@ class StepParamEasingInvocation(BaseInvocation):
easing_class = EASING_FUNCTIONS_MAP[self.easing] easing_class = EASING_FUNCTIONS_MAP[self.easing]
if log_diagnostics: if log_diagnostics:
context.services.logger.debug("easing class: " + str(easing_class)) context.services.logger.debug("easing class: " + str(easing_class))
easing_list = [] easing_list = list()
if self.mirror: # "expected" mirroring if self.mirror: # "expected" mirroring
# if number of steps is even, squeeze duration down to (number_of_steps)/2 # if number of steps is even, squeeze duration down to (number_of_steps)/2
# and create reverse copy of list to append # and create reverse copy of list to append
@ -178,7 +178,7 @@ class StepParamEasingInvocation(BaseInvocation):
end=self.end_value, end=self.end_value,
duration=base_easing_duration - 1, duration=base_easing_duration - 1,
) )
base_easing_vals = [] base_easing_vals = list()
for step_index in range(base_easing_duration): for step_index in range(base_easing_duration):
easing_val = easing_function.ease(step_index) easing_val = easing_function.ease(step_index)
base_easing_vals.append(easing_val) base_easing_vals.append(easing_val)

View File

@ -5,11 +5,10 @@ from typing import Optional, Tuple
import torch import torch
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from invokeai.app.shared.fields import FieldDescriptions
from .baseinvocation import ( from .baseinvocation import (
BaseInvocation, BaseInvocation,
BaseInvocationOutput, BaseInvocationOutput,
FieldDescriptions,
Input, Input,
InputField, InputField,
InvocationContext, InvocationContext,
@ -294,7 +293,7 @@ class DenoiseMaskField(BaseModel):
"""An inpaint mask field""" """An inpaint mask field"""
mask_name: str = Field(description="The name of the mask image") mask_name: str = Field(description="The name of the mask image")
masked_latents_name: Optional[str] = Field(default=None, description="The name of the masked image latents") masked_latents_name: Optional[str] = Field(description="The name of the masked image latents")
@invocation_output("denoise_mask_output") @invocation_output("denoise_mask_output")

View File

@ -1,9 +1,8 @@
from invokeai.app.shared.fields import FieldDescriptions
from ...backend.model_management import ModelType, SubModelType from ...backend.model_management import ModelType, SubModelType
from .baseinvocation import ( from .baseinvocation import (
BaseInvocation, BaseInvocation,
BaseInvocationOutput, BaseInvocationOutput,
FieldDescriptions,
Input, Input,
InputField, InputField,
InvocationContext, InvocationContext,

View File

@ -5,6 +5,7 @@ from pydantic import BaseModel, ConfigDict, Field
from invokeai.app.invocations.baseinvocation import ( from invokeai.app.invocations.baseinvocation import (
BaseInvocation, BaseInvocation,
BaseInvocationOutput, BaseInvocationOutput,
FieldDescriptions,
Input, Input,
InputField, InputField,
InvocationContext, InvocationContext,
@ -15,7 +16,6 @@ from invokeai.app.invocations.baseinvocation import (
) )
from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_RESIZE_VALUES from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_RESIZE_VALUES
from invokeai.app.invocations.primitives import ImageField from invokeai.app.invocations.primitives import ImageField
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.model_management.models.base import BaseModelType from invokeai.backend.model_management.models.base import BaseModelType

View File

@ -29,7 +29,7 @@ if choose_torch_device() == torch.device("mps"):
from torch import mps from torch import mps
@invocation("esrgan", title="Upscale (RealESRGAN)", tags=["esrgan", "upscale"], category="esrgan", version="1.2.0") @invocation("esrgan", title="Upscale (RealESRGAN)", tags=["esrgan", "upscale"], category="esrgan", version="1.1.0")
class ESRGANInvocation(BaseInvocation, WithWorkflow, WithMetadata): class ESRGANInvocation(BaseInvocation, WithWorkflow, WithMetadata):
"""Upscales an image using RealESRGAN.""" """Upscales an image using RealESRGAN."""

View File

@ -139,7 +139,7 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
(board_id,), (board_id,),
) )
result = cast(list[sqlite3.Row], self._cursor.fetchall()) result = cast(list[sqlite3.Row], self._cursor.fetchall())
images = [deserialize_image_record(dict(r)) for r in result] images = list(map(lambda r: deserialize_image_record(dict(r)), result))
self._cursor.execute( self._cursor.execute(
"""--sql """--sql
@ -167,7 +167,7 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
(board_id,), (board_id,),
) )
result = cast(list[sqlite3.Row], self._cursor.fetchall()) result = cast(list[sqlite3.Row], self._cursor.fetchall())
image_names = [r[0] for r in result] image_names = list(map(lambda r: r[0], result))
return image_names return image_names
except sqlite3.Error as e: except sqlite3.Error as e:
self._conn.rollback() self._conn.rollback()

View File

@ -199,7 +199,7 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
) )
result = cast(list[sqlite3.Row], self._cursor.fetchall()) result = cast(list[sqlite3.Row], self._cursor.fetchall())
boards = [deserialize_board_record(dict(r)) for r in result] boards = list(map(lambda r: deserialize_board_record(dict(r)), result))
# Get the total number of boards # Get the total number of boards
self._cursor.execute( self._cursor.execute(
@ -236,7 +236,7 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
) )
result = cast(list[sqlite3.Row], self._cursor.fetchall()) result = cast(list[sqlite3.Row], self._cursor.fetchall())
boards = [deserialize_board_record(dict(r)) for r in result] boards = list(map(lambda r: deserialize_board_record(dict(r)), result))
return boards return boards

View File

@ -55,7 +55,7 @@ class InvokeAISettings(BaseSettings):
""" """
cls = self.__class__ cls = self.__class__
type = get_args(get_type_hints(cls)["type"])[0] type = get_args(get_type_hints(cls)["type"])[0]
field_dict = {type: {}} field_dict = dict({type: dict()})
for name, field in self.model_fields.items(): for name, field in self.model_fields.items():
if name in cls._excluded_from_yaml(): if name in cls._excluded_from_yaml():
continue continue
@ -64,7 +64,7 @@ class InvokeAISettings(BaseSettings):
) )
value = getattr(self, name) value = getattr(self, name)
if category not in field_dict[type]: if category not in field_dict[type]:
field_dict[type][category] = {} field_dict[type][category] = dict()
# keep paths as strings to make it easier to read # keep paths as strings to make it easier to read
field_dict[type][category][name] = str(value) if isinstance(value, Path) else value field_dict[type][category][name] = str(value) if isinstance(value, Path) else value
conf = OmegaConf.create(field_dict) conf = OmegaConf.create(field_dict)
@ -89,7 +89,7 @@ class InvokeAISettings(BaseSettings):
# create an upcase version of the environment in # create an upcase version of the environment in
# order to achieve case-insensitive environment # order to achieve case-insensitive environment
# variables (the way Windows does) # variables (the way Windows does)
upcase_environ = {} upcase_environ = dict()
for key, value in os.environ.items(): for key, value in os.environ.items():
upcase_environ[key.upper()] = value upcase_environ[key.upper()] = value

View File

@ -45,7 +45,6 @@ InvokeAI:
ram: 13.5 ram: 13.5
vram: 0.25 vram: 0.25
lazy_offload: true lazy_offload: true
log_memory_usage: false
Device: Device:
device: auto device: auto
precision: auto precision: auto
@ -188,18 +187,18 @@ DEFAULT_MAX_VRAM = 0.5
class Categories(object): class Categories(object):
WebServer = {"category": "Web Server"} WebServer = dict(category="Web Server")
Features = {"category": "Features"} Features = dict(category="Features")
Paths = {"category": "Paths"} Paths = dict(category="Paths")
Logging = {"category": "Logging"} Logging = dict(category="Logging")
Development = {"category": "Development"} Development = dict(category="Development")
Other = {"category": "Other"} Other = dict(category="Other")
ModelCache = {"category": "Model Cache"} ModelCache = dict(category="Model Cache")
Device = {"category": "Device"} Device = dict(category="Device")
Generation = {"category": "Generation"} Generation = dict(category="Generation")
Queue = {"category": "Queue"} Queue = dict(category="Queue")
Nodes = {"category": "Nodes"} Nodes = dict(category="Nodes")
MemoryPerformance = {"category": "Memory/Performance"} MemoryPerformance = dict(category="Memory/Performance")
class InvokeAIAppConfig(InvokeAISettings): class InvokeAIAppConfig(InvokeAISettings):
@ -262,7 +261,6 @@ class InvokeAIAppConfig(InvokeAISettings):
ram : float = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) ram : float = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", json_schema_extra=Categories.ModelCache, )
vram : float = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) vram : float = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", json_schema_extra=Categories.ModelCache, )
lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", json_schema_extra=Categories.ModelCache, ) lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", json_schema_extra=Categories.ModelCache, )
log_memory_usage : bool = Field(default=False, description="If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.", json_schema_extra=Categories.ModelCache)
# DEVICE # DEVICE
device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", json_schema_extra=Categories.Device) device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", json_schema_extra=Categories.Device)
@ -482,7 +480,7 @@ def _find_root() -> Path:
venv = Path(os.environ.get("VIRTUAL_ENV") or ".") venv = Path(os.environ.get("VIRTUAL_ENV") or ".")
if os.environ.get("INVOKEAI_ROOT"): if os.environ.get("INVOKEAI_ROOT"):
root = Path(os.environ["INVOKEAI_ROOT"]) root = Path(os.environ["INVOKEAI_ROOT"])
elif any((venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE]): elif any([(venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE]]):
root = (venv.parent).resolve() root = (venv.parent).resolve()
else: else:
root = Path("~/invokeai").expanduser().resolve() root = Path("~/invokeai").expanduser().resolve()

View File

@ -27,7 +27,7 @@ class EventServiceBase:
payload["timestamp"] = get_timestamp() payload["timestamp"] = get_timestamp()
self.dispatch( self.dispatch(
event_name=EventServiceBase.queue_event, event_name=EventServiceBase.queue_event,
payload={"event": event_name, "data": payload}, payload=dict(event=event_name, data=payload),
) )
# Define events here for every event in the system. # Define events here for every event in the system.
@ -48,18 +48,18 @@ class EventServiceBase:
"""Emitted when there is generation progress""" """Emitted when there is generation progress"""
self.__emit_queue_event( self.__emit_queue_event(
event_name="generator_progress", event_name="generator_progress",
payload={ payload=dict(
"queue_id": queue_id, queue_id=queue_id,
"queue_item_id": queue_item_id, queue_item_id=queue_item_id,
"queue_batch_id": queue_batch_id, queue_batch_id=queue_batch_id,
"graph_execution_state_id": graph_execution_state_id, graph_execution_state_id=graph_execution_state_id,
"node_id": node.get("id"), node_id=node.get("id"),
"source_node_id": source_node_id, source_node_id=source_node_id,
"progress_image": progress_image.model_dump() if progress_image is not None else None, progress_image=progress_image.model_dump() if progress_image is not None else None,
"step": step, step=step,
"order": order, order=order,
"total_steps": total_steps, total_steps=total_steps,
}, ),
) )
def emit_invocation_complete( def emit_invocation_complete(
@ -75,15 +75,15 @@ class EventServiceBase:
"""Emitted when an invocation has completed""" """Emitted when an invocation has completed"""
self.__emit_queue_event( self.__emit_queue_event(
event_name="invocation_complete", event_name="invocation_complete",
payload={ payload=dict(
"queue_id": queue_id, queue_id=queue_id,
"queue_item_id": queue_item_id, queue_item_id=queue_item_id,
"queue_batch_id": queue_batch_id, queue_batch_id=queue_batch_id,
"graph_execution_state_id": graph_execution_state_id, graph_execution_state_id=graph_execution_state_id,
"node": node, node=node,
"source_node_id": source_node_id, source_node_id=source_node_id,
"result": result, result=result,
}, ),
) )
def emit_invocation_error( def emit_invocation_error(
@ -100,16 +100,16 @@ class EventServiceBase:
"""Emitted when an invocation has completed""" """Emitted when an invocation has completed"""
self.__emit_queue_event( self.__emit_queue_event(
event_name="invocation_error", event_name="invocation_error",
payload={ payload=dict(
"queue_id": queue_id, queue_id=queue_id,
"queue_item_id": queue_item_id, queue_item_id=queue_item_id,
"queue_batch_id": queue_batch_id, queue_batch_id=queue_batch_id,
"graph_execution_state_id": graph_execution_state_id, graph_execution_state_id=graph_execution_state_id,
"node": node, node=node,
"source_node_id": source_node_id, source_node_id=source_node_id,
"error_type": error_type, error_type=error_type,
"error": error, error=error,
}, ),
) )
def emit_invocation_started( def emit_invocation_started(
@ -124,14 +124,14 @@ class EventServiceBase:
"""Emitted when an invocation has started""" """Emitted when an invocation has started"""
self.__emit_queue_event( self.__emit_queue_event(
event_name="invocation_started", event_name="invocation_started",
payload={ payload=dict(
"queue_id": queue_id, queue_id=queue_id,
"queue_item_id": queue_item_id, queue_item_id=queue_item_id,
"queue_batch_id": queue_batch_id, queue_batch_id=queue_batch_id,
"graph_execution_state_id": graph_execution_state_id, graph_execution_state_id=graph_execution_state_id,
"node": node, node=node,
"source_node_id": source_node_id, source_node_id=source_node_id,
}, ),
) )
def emit_graph_execution_complete( def emit_graph_execution_complete(
@ -140,12 +140,12 @@ class EventServiceBase:
"""Emitted when a session has completed all invocations""" """Emitted when a session has completed all invocations"""
self.__emit_queue_event( self.__emit_queue_event(
event_name="graph_execution_state_complete", event_name="graph_execution_state_complete",
payload={ payload=dict(
"queue_id": queue_id, queue_id=queue_id,
"queue_item_id": queue_item_id, queue_item_id=queue_item_id,
"queue_batch_id": queue_batch_id, queue_batch_id=queue_batch_id,
"graph_execution_state_id": graph_execution_state_id, graph_execution_state_id=graph_execution_state_id,
}, ),
) )
def emit_model_load_started( def emit_model_load_started(
@ -162,16 +162,16 @@ class EventServiceBase:
"""Emitted when a model is requested""" """Emitted when a model is requested"""
self.__emit_queue_event( self.__emit_queue_event(
event_name="model_load_started", event_name="model_load_started",
payload={ payload=dict(
"queue_id": queue_id, queue_id=queue_id,
"queue_item_id": queue_item_id, queue_item_id=queue_item_id,
"queue_batch_id": queue_batch_id, queue_batch_id=queue_batch_id,
"graph_execution_state_id": graph_execution_state_id, graph_execution_state_id=graph_execution_state_id,
"model_name": model_name, model_name=model_name,
"base_model": base_model, base_model=base_model,
"model_type": model_type, model_type=model_type,
"submodel": submodel, submodel=submodel,
}, ),
) )
def emit_model_load_completed( def emit_model_load_completed(
@ -189,19 +189,19 @@ class EventServiceBase:
"""Emitted when a model is correctly loaded (returns model info)""" """Emitted when a model is correctly loaded (returns model info)"""
self.__emit_queue_event( self.__emit_queue_event(
event_name="model_load_completed", event_name="model_load_completed",
payload={ payload=dict(
"queue_id": queue_id, queue_id=queue_id,
"queue_item_id": queue_item_id, queue_item_id=queue_item_id,
"queue_batch_id": queue_batch_id, queue_batch_id=queue_batch_id,
"graph_execution_state_id": graph_execution_state_id, graph_execution_state_id=graph_execution_state_id,
"model_name": model_name, model_name=model_name,
"base_model": base_model, base_model=base_model,
"model_type": model_type, model_type=model_type,
"submodel": submodel, submodel=submodel,
"hash": model_info.hash, hash=model_info.hash,
"location": str(model_info.location), location=str(model_info.location),
"precision": str(model_info.precision), precision=str(model_info.precision),
}, ),
) )
def emit_session_retrieval_error( def emit_session_retrieval_error(
@ -216,14 +216,14 @@ class EventServiceBase:
"""Emitted when session retrieval fails""" """Emitted when session retrieval fails"""
self.__emit_queue_event( self.__emit_queue_event(
event_name="session_retrieval_error", event_name="session_retrieval_error",
payload={ payload=dict(
"queue_id": queue_id, queue_id=queue_id,
"queue_item_id": queue_item_id, queue_item_id=queue_item_id,
"queue_batch_id": queue_batch_id, queue_batch_id=queue_batch_id,
"graph_execution_state_id": graph_execution_state_id, graph_execution_state_id=graph_execution_state_id,
"error_type": error_type, error_type=error_type,
"error": error, error=error,
}, ),
) )
def emit_invocation_retrieval_error( def emit_invocation_retrieval_error(
@ -239,15 +239,15 @@ class EventServiceBase:
"""Emitted when invocation retrieval fails""" """Emitted when invocation retrieval fails"""
self.__emit_queue_event( self.__emit_queue_event(
event_name="invocation_retrieval_error", event_name="invocation_retrieval_error",
payload={ payload=dict(
"queue_id": queue_id, queue_id=queue_id,
"queue_item_id": queue_item_id, queue_item_id=queue_item_id,
"queue_batch_id": queue_batch_id, queue_batch_id=queue_batch_id,
"graph_execution_state_id": graph_execution_state_id, graph_execution_state_id=graph_execution_state_id,
"node_id": node_id, node_id=node_id,
"error_type": error_type, error_type=error_type,
"error": error, error=error,
}, ),
) )
def emit_session_canceled( def emit_session_canceled(
@ -260,12 +260,12 @@ class EventServiceBase:
"""Emitted when a session is canceled""" """Emitted when a session is canceled"""
self.__emit_queue_event( self.__emit_queue_event(
event_name="session_canceled", event_name="session_canceled",
payload={ payload=dict(
"queue_id": queue_id, queue_id=queue_id,
"queue_item_id": queue_item_id, queue_item_id=queue_item_id,
"queue_batch_id": queue_batch_id, queue_batch_id=queue_batch_id,
"graph_execution_state_id": graph_execution_state_id, graph_execution_state_id=graph_execution_state_id,
}, ),
) )
def emit_queue_item_status_changed( def emit_queue_item_status_changed(
@ -277,39 +277,39 @@ class EventServiceBase:
"""Emitted when a queue item's status changes""" """Emitted when a queue item's status changes"""
self.__emit_queue_event( self.__emit_queue_event(
event_name="queue_item_status_changed", event_name="queue_item_status_changed",
payload={ payload=dict(
"queue_id": queue_status.queue_id, queue_id=queue_status.queue_id,
"queue_item": { queue_item=dict(
"queue_id": session_queue_item.queue_id, queue_id=session_queue_item.queue_id,
"item_id": session_queue_item.item_id, item_id=session_queue_item.item_id,
"status": session_queue_item.status, status=session_queue_item.status,
"batch_id": session_queue_item.batch_id, batch_id=session_queue_item.batch_id,
"session_id": session_queue_item.session_id, session_id=session_queue_item.session_id,
"error": session_queue_item.error, error=session_queue_item.error,
"created_at": str(session_queue_item.created_at) if session_queue_item.created_at else None, created_at=str(session_queue_item.created_at) if session_queue_item.created_at else None,
"updated_at": str(session_queue_item.updated_at) if session_queue_item.updated_at else None, updated_at=str(session_queue_item.updated_at) if session_queue_item.updated_at else None,
"started_at": str(session_queue_item.started_at) if session_queue_item.started_at else None, started_at=str(session_queue_item.started_at) if session_queue_item.started_at else None,
"completed_at": str(session_queue_item.completed_at) if session_queue_item.completed_at else None, completed_at=str(session_queue_item.completed_at) if session_queue_item.completed_at else None,
}, ),
"batch_status": batch_status.model_dump(), batch_status=batch_status.model_dump(),
"queue_status": queue_status.model_dump(), queue_status=queue_status.model_dump(),
}, ),
) )
def emit_batch_enqueued(self, enqueue_result: EnqueueBatchResult) -> None: def emit_batch_enqueued(self, enqueue_result: EnqueueBatchResult) -> None:
"""Emitted when a batch is enqueued""" """Emitted when a batch is enqueued"""
self.__emit_queue_event( self.__emit_queue_event(
event_name="batch_enqueued", event_name="batch_enqueued",
payload={ payload=dict(
"queue_id": enqueue_result.queue_id, queue_id=enqueue_result.queue_id,
"batch_id": enqueue_result.batch.batch_id, batch_id=enqueue_result.batch.batch_id,
"enqueued": enqueue_result.enqueued, enqueued=enqueue_result.enqueued,
}, ),
) )
def emit_queue_cleared(self, queue_id: str) -> None: def emit_queue_cleared(self, queue_id: str) -> None:
"""Emitted when the queue is cleared""" """Emitted when the queue is cleared"""
self.__emit_queue_event( self.__emit_queue_event(
event_name="queue_cleared", event_name="queue_cleared",
payload={"queue_id": queue_id}, payload=dict(queue_id=queue_id),
) )

View File

@ -25,7 +25,7 @@ class DiskImageFileStorage(ImageFileStorageBase):
__invoker: Invoker __invoker: Invoker
def __init__(self, output_folder: Union[str, Path]): def __init__(self, output_folder: Union[str, Path]):
self.__cache = {} self.__cache = dict()
self.__cache_ids = Queue() self.__cache_ids = Queue()
self.__max_cache_size = 10 # TODO: get this from config self.__max_cache_size = 10 # TODO: get this from config

View File

@ -90,23 +90,25 @@ class ImageRecordDeleteException(Exception):
IMAGE_DTO_COLS = ", ".join( IMAGE_DTO_COLS = ", ".join(
[ list(
"images." + c map(
for c in [ lambda c: "images." + c,
"image_name", [
"image_origin", "image_name",
"image_category", "image_origin",
"width", "image_category",
"height", "width",
"session_id", "height",
"node_id", "session_id",
"is_intermediate", "node_id",
"created_at", "is_intermediate",
"updated_at", "created_at",
"deleted_at", "updated_at",
"starred", "deleted_at",
] "starred",
] ],
)
)
) )

View File

@ -263,7 +263,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
if categories is not None: if categories is not None:
# Convert the enum values to unique list of strings # Convert the enum values to unique list of strings
category_strings = [c.value for c in set(categories)] category_strings = list(map(lambda c: c.value, set(categories)))
# Create the correct length of placeholders # Create the correct length of placeholders
placeholders = ",".join("?" * len(category_strings)) placeholders = ",".join("?" * len(category_strings))
@ -307,7 +307,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
# Build the list of images, deserializing each row # Build the list of images, deserializing each row
self._cursor.execute(images_query, images_params) self._cursor.execute(images_query, images_params)
result = cast(list[sqlite3.Row], self._cursor.fetchall()) result = cast(list[sqlite3.Row], self._cursor.fetchall())
images = [deserialize_image_record(dict(r)) for r in result] images = list(map(lambda r: deserialize_image_record(dict(r)), result))
# Set up and execute the count query, without pagination # Set up and execute the count query, without pagination
count_query += query_conditions + ";" count_query += query_conditions + ";"
@ -386,7 +386,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
""" """
) )
result = cast(list[sqlite3.Row], self._cursor.fetchall()) result = cast(list[sqlite3.Row], self._cursor.fetchall())
image_names = [r[0] for r in result] image_names = list(map(lambda r: r[0], result))
self._cursor.execute( self._cursor.execute(
"""--sql """--sql
DELETE FROM images DELETE FROM images

View File

@ -21,8 +21,8 @@ class ImageServiceABC(ABC):
_on_deleted_callbacks: list[Callable[[str], None]] _on_deleted_callbacks: list[Callable[[str], None]]
def __init__(self) -> None: def __init__(self) -> None:
self._on_changed_callbacks = [] self._on_changed_callbacks = list()
self._on_deleted_callbacks = [] self._on_deleted_callbacks = list()
def on_changed(self, on_changed: Callable[[ImageDTO], None]) -> None: def on_changed(self, on_changed: Callable[[ImageDTO], None]) -> None:
"""Register a callback for when an image is changed""" """Register a callback for when an image is changed"""

View File

@ -217,16 +217,18 @@ class ImageService(ImageServiceABC):
board_id, board_id,
) )
image_dtos = [ image_dtos = list(
image_record_to_dto( map(
image_record=r, lambda r: image_record_to_dto(
image_url=self.__invoker.services.urls.get_image_url(r.image_name), image_record=r,
thumbnail_url=self.__invoker.services.urls.get_image_url(r.image_name, True), image_url=self.__invoker.services.urls.get_image_url(r.image_name),
board_id=self.__invoker.services.board_image_records.get_board_for_image(r.image_name), thumbnail_url=self.__invoker.services.urls.get_image_url(r.image_name, True),
workflow_id=self.__invoker.services.workflow_image_records.get_workflow_for_image(r.image_name), board_id=self.__invoker.services.board_image_records.get_board_for_image(r.image_name),
workflow_id=self.__invoker.services.workflow_image_records.get_workflow_for_image(r.image_name),
),
results.items,
) )
for r in results.items )
]
return OffsetPaginatedResults[ImageDTO]( return OffsetPaginatedResults[ImageDTO](
items=image_dtos, items=image_dtos,

View File

@ -1,5 +1,5 @@
from abc import ABC from abc import ABC
class InvocationProcessorABC(ABC): # noqa: B024 class InvocationProcessorABC(ABC):
pass pass

View File

@ -26,7 +26,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
self.__invoker_thread = Thread( self.__invoker_thread = Thread(
name="invoker_processor", name="invoker_processor",
target=self.__process, target=self.__process,
kwargs={"stop_event": self.__stop_event}, kwargs=dict(stop_event=self.__stop_event),
) )
self.__invoker_thread.daemon = True # TODO: make async and do not use threads self.__invoker_thread.daemon = True # TODO: make async and do not use threads
self.__invoker_thread.start() self.__invoker_thread.start()

View File

@ -14,7 +14,7 @@ class MemoryInvocationQueue(InvocationQueueABC):
def __init__(self): def __init__(self):
self.__queue = Queue() self.__queue = Queue()
self.__cancellations = {} self.__cancellations = dict()
def get(self) -> InvocationQueueItem: def get(self) -> InvocationQueueItem:
item = self.__queue.get() item = self.__queue.get()

View File

@ -22,7 +22,6 @@ if TYPE_CHECKING:
from .item_storage.item_storage_base import ItemStorageABC from .item_storage.item_storage_base import ItemStorageABC
from .latents_storage.latents_storage_base import LatentsStorageBase from .latents_storage.latents_storage_base import LatentsStorageBase
from .model_manager.model_manager_base import ModelManagerServiceBase from .model_manager.model_manager_base import ModelManagerServiceBase
from .model_records import ModelRecordServiceBase
from .names.names_base import NameServiceBase from .names.names_base import NameServiceBase
from .session_processor.session_processor_base import SessionProcessorBase from .session_processor.session_processor_base import SessionProcessorBase
from .session_queue.session_queue_base import SessionQueueBase from .session_queue.session_queue_base import SessionQueueBase
@ -50,7 +49,6 @@ class InvocationServices:
latents: "LatentsStorageBase" latents: "LatentsStorageBase"
logger: "Logger" logger: "Logger"
model_manager: "ModelManagerServiceBase" model_manager: "ModelManagerServiceBase"
model_records: "ModelRecordServiceBase"
processor: "InvocationProcessorABC" processor: "InvocationProcessorABC"
performance_statistics: "InvocationStatsServiceBase" performance_statistics: "InvocationStatsServiceBase"
queue: "InvocationQueueABC" queue: "InvocationQueueABC"
@ -78,7 +76,6 @@ class InvocationServices:
latents: "LatentsStorageBase", latents: "LatentsStorageBase",
logger: "Logger", logger: "Logger",
model_manager: "ModelManagerServiceBase", model_manager: "ModelManagerServiceBase",
model_records: "ModelRecordServiceBase",
processor: "InvocationProcessorABC", processor: "InvocationProcessorABC",
performance_statistics: "InvocationStatsServiceBase", performance_statistics: "InvocationStatsServiceBase",
queue: "InvocationQueueABC", queue: "InvocationQueueABC",
@ -104,7 +101,6 @@ class InvocationServices:
self.latents = latents self.latents = latents
self.logger = logger self.logger = logger
self.model_manager = model_manager self.model_manager = model_manager
self.model_records = model_records
self.processor = processor self.processor = processor
self.performance_statistics = performance_statistics self.performance_statistics = performance_statistics
self.queue = queue self.queue = queue

View File

@ -122,7 +122,7 @@ class InvocationStatsService(InvocationStatsServiceBase):
def log_stats(self): def log_stats(self):
completed = set() completed = set()
errored = set() errored = set()
for graph_id, _node_log in self._stats.items(): for graph_id, node_log in self._stats.items():
try: try:
current_graph_state = self._invoker.services.graph_execution_manager.get(graph_id) current_graph_state = self._invoker.services.graph_execution_manager.get(graph_id)
except Exception: except Exception:
@ -142,7 +142,7 @@ class InvocationStatsService(InvocationStatsServiceBase):
cache_stats = self._cache_stats[graph_id] cache_stats = self._cache_stats[graph_id]
hwm = cache_stats.high_watermark / GIG hwm = cache_stats.high_watermark / GIG
tot = cache_stats.cache_size / GIG tot = cache_stats.cache_size / GIG
loaded = sum(list(cache_stats.loaded_model_sizes.values())) / GIG loaded = sum([v for v in cache_stats.loaded_model_sizes.values()]) / GIG
logger.info(f"TOTAL GRAPH EXECUTION TIME: {total_time:7.3f}s") logger.info(f"TOTAL GRAPH EXECUTION TIME: {total_time:7.3f}s")
logger.info("RAM used by InvokeAI process: " + "%4.2fG" % self.ram_used + f" ({self.ram_changed:+5.3f}G)") logger.info("RAM used by InvokeAI process: " + "%4.2fG" % self.ram_used + f" ({self.ram_changed:+5.3f}G)")

View File

@ -15,8 +15,8 @@ class ItemStorageABC(ABC, Generic[T]):
_on_deleted_callbacks: list[Callable[[str], None]] _on_deleted_callbacks: list[Callable[[str], None]]
def __init__(self) -> None: def __init__(self) -> None:
self._on_changed_callbacks = [] self._on_changed_callbacks = list()
self._on_deleted_callbacks = [] self._on_deleted_callbacks = list()
"""Base item storage class""" """Base item storage class"""

View File

@ -112,7 +112,7 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
) )
result = self._cursor.fetchall() result = self._cursor.fetchall()
items = [self._parse_item(r[0]) for r in result] items = list(map(lambda r: self._parse_item(r[0]), result))
self._cursor.execute(f"""SELECT count(*) FROM {self._table_name};""") self._cursor.execute(f"""SELECT count(*) FROM {self._table_name};""")
count = self._cursor.fetchone()[0] count = self._cursor.fetchone()[0]
@ -132,7 +132,7 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
) )
result = self._cursor.fetchall() result = self._cursor.fetchall()
items = [self._parse_item(r[0]) for r in result] items = list(map(lambda r: self._parse_item(r[0]), result))
self._cursor.execute( self._cursor.execute(
f"""SELECT count(*) FROM {self._table_name} WHERE item LIKE ?;""", f"""SELECT count(*) FROM {self._table_name} WHERE item LIKE ?;""",

View File

@ -13,8 +13,8 @@ class LatentsStorageBase(ABC):
_on_deleted_callbacks: list[Callable[[str], None]] _on_deleted_callbacks: list[Callable[[str], None]]
def __init__(self) -> None: def __init__(self) -> None:
self._on_changed_callbacks = [] self._on_changed_callbacks = list()
self._on_deleted_callbacks = [] self._on_deleted_callbacks = list()
@abstractmethod @abstractmethod
def get(self, name: str) -> torch.Tensor: def get(self, name: str) -> torch.Tensor:

View File

@ -19,7 +19,7 @@ class ForwardCacheLatentsStorage(LatentsStorageBase):
def __init__(self, underlying_storage: LatentsStorageBase, max_cache_size: int = 20): def __init__(self, underlying_storage: LatentsStorageBase, max_cache_size: int = 20):
super().__init__() super().__init__()
self.__underlying_storage = underlying_storage self.__underlying_storage = underlying_storage
self.__cache = {} self.__cache = dict()
self.__cache_ids = Queue() self.__cache_ids = Queue()
self.__max_cache_size = max_cache_size self.__max_cache_size = max_cache_size

View File

@ -1 +0,0 @@
from .model_manager_default import ModelManagerService # noqa F401

View File

@ -1,8 +0,0 @@
"""Init file for model record services."""
from .model_records_base import ( # noqa F401
DuplicateModelException,
InvalidModelException,
ModelRecordServiceBase,
UnknownModelException,
)
from .model_records_sql import ModelRecordServiceSQL # noqa F401

View File

@ -1,169 +0,0 @@
# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Development Team
"""
Abstract base class for storing and retrieving model configuration records.
"""
from abc import ABC, abstractmethod
from pathlib import Path
from typing import List, Optional, Union
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelType
# should match the InvokeAI version when this is first released.
CONFIG_FILE_VERSION = "3.2.0"
class DuplicateModelException(Exception):
"""Raised on an attempt to add a model with the same key twice."""
class InvalidModelException(Exception):
"""Raised when an invalid model is detected."""
class UnknownModelException(Exception):
"""Raised on an attempt to fetch or delete a model with a nonexistent key."""
class ConfigFileVersionMismatchException(Exception):
"""Raised on an attempt to open a config with an incompatible version."""
class ModelRecordServiceBase(ABC):
"""Abstract base class for storage and retrieval of model configs."""
@property
@abstractmethod
def version(self) -> str:
"""Return the config file/database schema version."""
pass
@abstractmethod
def add_model(self, key: str, config: Union[dict, AnyModelConfig]) -> AnyModelConfig:
"""
Add a model to the database.
:param key: Unique key for the model
:param config: Model configuration record, either a dict with the
required fields or a ModelConfigBase instance.
Can raise DuplicateModelException and InvalidModelConfigException exceptions.
"""
pass
@abstractmethod
def del_model(self, key: str) -> None:
"""
Delete a model.
:param key: Unique key for the model to be deleted
Can raise an UnknownModelException
"""
pass
@abstractmethod
def update_model(self, key: str, config: Union[dict, AnyModelConfig]) -> AnyModelConfig:
"""
Update the model, returning the updated version.
:param key: Unique key for the model to be updated
:param config: Model configuration record. Either a dict with the
required fields, or a ModelConfigBase instance.
"""
pass
@abstractmethod
def get_model(self, key: str) -> AnyModelConfig:
"""
Retrieve the configuration for the indicated model.
:param key: Key of model config to be fetched.
Exceptions: UnknownModelException
"""
pass
@abstractmethod
def exists(self, key: str) -> bool:
"""
Return True if a model with the indicated key exists in the databse.
:param key: Unique key for the model to be deleted
"""
pass
@abstractmethod
def search_by_path(
self,
path: Union[str, Path],
) -> List[AnyModelConfig]:
"""Return the model(s) having the indicated path."""
pass
@abstractmethod
def search_by_hash(
self,
hash: str,
) -> List[AnyModelConfig]:
"""Return the model(s) having the indicated original hash."""
pass
@abstractmethod
def search_by_attr(
self,
model_name: Optional[str] = None,
base_model: Optional[BaseModelType] = None,
model_type: Optional[ModelType] = None,
) -> List[AnyModelConfig]:
"""
Return models matching name, base and/or type.
:param model_name: Filter by name of model (optional)
:param base_model: Filter by base model (optional)
:param model_type: Filter by type of model (optional)
If none of the optional filters are passed, will return all
models in the database.
"""
pass
def all_models(self) -> List[AnyModelConfig]:
"""Return all the model configs in the database."""
return self.search_by_attr()
def model_info_by_name(self, model_name: str, base_model: BaseModelType, model_type: ModelType) -> AnyModelConfig:
"""
Return information about a single model using its name, base type and model type.
If there are more than one model that match, raises a DuplicateModelException.
If no model matches, raises an UnknownModelException
"""
model_configs = self.search_by_attr(model_name=model_name, base_model=base_model, model_type=model_type)
if len(model_configs) > 1:
raise DuplicateModelException(
f"More than one model matched the search criteria: base_model='{base_model}', model_type='{model_type}', model_name='{model_name}'."
)
if len(model_configs) == 0:
raise UnknownModelException(
f"More than one model matched the search criteria: base_model='{base_model}', model_type='{model_type}', model_name='{model_name}'."
)
return model_configs[0]
def rename_model(
self,
key: str,
new_name: str,
) -> AnyModelConfig:
"""
Rename the indicated model. Just a special case of update_model().
In some implementations, renaming the model may involve changing where
it is stored on the filesystem. So this is broken out.
:param key: Model key
:param new_name: New name for model
"""
config = self.get_model(key)
config.name = new_name
return self.update_model(key, config)

View File

@ -1,397 +0,0 @@
# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Development Team
"""
SQL Implementation of the ModelRecordServiceBase API
Typical usage:
from invokeai.backend.model_manager import ModelConfigStoreSQL
store = ModelConfigStoreSQL(sqlite_db)
config = dict(
path='/tmp/pokemon.bin',
name='old name',
base_model='sd-1',
type='embedding',
format='embedding_file',
)
# adding - the key becomes the model's "key" field
store.add_model('key1', config)
# updating
config.name='new name'
store.update_model('key1', config)
# checking for existence
if store.exists('key1'):
print("yes")
# fetching config
new_config = store.get_model('key1')
print(new_config.name, new_config.base)
assert new_config.key == 'key1'
# deleting
store.del_model('key1')
# searching
configs = store.search_by_path(path='/tmp/pokemon.bin')
configs = store.search_by_hash('750a499f35e43b7e1b4d15c207aa2f01')
configs = store.search_by_attr(base_model='sd-2', model_type='main')
"""
import json
import sqlite3
from pathlib import Path
from typing import List, Optional, Union
from invokeai.backend.model_manager.config import (
AnyModelConfig,
BaseModelType,
ModelConfigBase,
ModelConfigFactory,
ModelType,
)
from ..shared.sqlite import SqliteDatabase
from .model_records_base import (
CONFIG_FILE_VERSION,
DuplicateModelException,
ModelRecordServiceBase,
UnknownModelException,
)
class ModelRecordServiceSQL(ModelRecordServiceBase):
"""Implementation of the ModelConfigStore ABC using a SQL database."""
_db: SqliteDatabase
_cursor: sqlite3.Cursor
def __init__(self, db: SqliteDatabase):
"""
Initialize a new object from preexisting sqlite3 connection and threading lock objects.
:param conn: sqlite3 connection object
:param lock: threading Lock object
"""
super().__init__()
self._db = db
self._cursor = self._db.conn.cursor()
with self._db.lock:
# Enable foreign keys
self._db.conn.execute("PRAGMA foreign_keys = ON;")
self._create_tables()
self._db.conn.commit()
assert (
str(self.version) == CONFIG_FILE_VERSION
), f"Model config version {self.version} does not match expected version {CONFIG_FILE_VERSION}"
def _create_tables(self) -> None:
"""Create sqlite3 tables."""
# model_config table breaks out the fields that are common to all config objects
# and puts class-specific ones in a serialized json object
self._cursor.execute(
"""--sql
CREATE TABLE IF NOT EXISTS model_config (
id TEXT NOT NULL PRIMARY KEY,
-- The next 3 fields are enums in python, unrestricted string here
base TEXT NOT NULL,
type TEXT NOT NULL,
name TEXT NOT NULL,
path TEXT NOT NULL,
original_hash TEXT, -- could be null
-- Serialized JSON representation of the whole config object,
-- which will contain additional fields from subclasses
config TEXT NOT NULL,
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
-- Updated via trigger
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
-- unique constraint on combo of name, base and type
UNIQUE(name, base, type)
);
"""
)
# metadata table
self._cursor.execute(
"""--sql
CREATE TABLE IF NOT EXISTS model_manager_metadata (
metadata_key TEXT NOT NULL PRIMARY KEY,
metadata_value TEXT NOT NULL
);
"""
)
# Add trigger for `updated_at`.
self._cursor.execute(
"""--sql
CREATE TRIGGER IF NOT EXISTS model_config_updated_at
AFTER UPDATE
ON model_config FOR EACH ROW
BEGIN
UPDATE model_config SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
WHERE id = old.id;
END;
"""
)
# Add indexes for searchable fields
for stmt in [
"CREATE INDEX IF NOT EXISTS base_index ON model_config(base);",
"CREATE INDEX IF NOT EXISTS type_index ON model_config(type);",
"CREATE INDEX IF NOT EXISTS name_index ON model_config(name);",
"CREATE UNIQUE INDEX IF NOT EXISTS path_index ON model_config(path);",
]:
self._cursor.execute(stmt)
# Add our version to the metadata table
self._cursor.execute(
"""--sql
INSERT OR IGNORE into model_manager_metadata (
metadata_key,
metadata_value
)
VALUES (?,?);
""",
("version", CONFIG_FILE_VERSION),
)
def add_model(self, key: str, config: Union[dict, ModelConfigBase]) -> AnyModelConfig:
"""
Add a model to the database.
:param key: Unique key for the model
:param config: Model configuration record, either a dict with the
required fields or a ModelConfigBase instance.
Can raise DuplicateModelException and InvalidModelConfigException exceptions.
"""
record = ModelConfigFactory.make_config(config, key=key) # ensure it is a valid config obect.
json_serialized = record.model_dump_json() # and turn it into a json string.
with self._db.lock:
try:
self._cursor.execute(
"""--sql
INSERT INTO model_config (
id,
base,
type,
name,
path,
original_hash,
config
)
VALUES (?,?,?,?,?,?,?);
""",
(
key,
record.base,
record.type,
record.name,
record.path,
record.original_hash,
json_serialized,
),
)
self._db.conn.commit()
except sqlite3.IntegrityError as e:
self._db.conn.rollback()
if "UNIQUE constraint failed" in str(e):
if "model_config.path" in str(e):
msg = f"A model with path '{record.path}' is already installed"
elif "model_config.name" in str(e):
msg = f"A model with name='{record.name}', type='{record.type}', base='{record.base}' is already installed"
else:
msg = f"A model with key '{key}' is already installed"
raise DuplicateModelException(msg) from e
else:
raise e
except sqlite3.Error as e:
self._db.conn.rollback()
raise e
return self.get_model(key)
@property
def version(self) -> str:
"""Return the version of the database schema."""
with self._db.lock:
self._cursor.execute(
"""--sql
SELECT metadata_value FROM model_manager_metadata
WHERE metadata_key=?;
""",
("version",),
)
rows = self._cursor.fetchone()
if not rows:
raise KeyError("Models database does not have metadata key 'version'")
return rows[0]
def del_model(self, key: str) -> None:
"""
Delete a model.
:param key: Unique key for the model to be deleted
Can raise an UnknownModelException
"""
with self._db.lock:
try:
self._cursor.execute(
"""--sql
DELETE FROM model_config
WHERE id=?;
""",
(key,),
)
if self._cursor.rowcount == 0:
raise UnknownModelException("model not found")
self._db.conn.commit()
except sqlite3.Error as e:
self._db.conn.rollback()
raise e
def update_model(self, key: str, config: ModelConfigBase) -> AnyModelConfig:
"""
Update the model, returning the updated version.
:param key: Unique key for the model to be updated
:param config: Model configuration record. Either a dict with the
required fields, or a ModelConfigBase instance.
"""
record = ModelConfigFactory.make_config(config, key=key) # ensure it is a valid config obect
json_serialized = record.model_dump_json() # and turn it into a json string.
with self._db.lock:
try:
self._cursor.execute(
"""--sql
UPDATE model_config
SET base=?,
type=?,
name=?,
path=?,
config=?
WHERE id=?;
""",
(record.base, record.type, record.name, record.path, json_serialized, key),
)
if self._cursor.rowcount == 0:
raise UnknownModelException("model not found")
self._db.conn.commit()
except sqlite3.Error as e:
self._db.conn.rollback()
raise e
return self.get_model(key)
def get_model(self, key: str) -> AnyModelConfig:
"""
Retrieve the ModelConfigBase instance for the indicated model.
:param key: Key of model config to be fetched.
Exceptions: UnknownModelException
"""
with self._db.lock:
self._cursor.execute(
"""--sql
SELECT config FROM model_config
WHERE id=?;
""",
(key,),
)
rows = self._cursor.fetchone()
if not rows:
raise UnknownModelException("model not found")
model = ModelConfigFactory.make_config(json.loads(rows[0]))
return model
def exists(self, key: str) -> bool:
"""
Return True if a model with the indicated key exists in the databse.
:param key: Unique key for the model to be deleted
"""
count = 0
with self._db.lock:
self._cursor.execute(
"""--sql
select count(*) FROM model_config
WHERE id=?;
""",
(key,),
)
count = self._cursor.fetchone()[0]
return count > 0
def search_by_attr(
self,
model_name: Optional[str] = None,
base_model: Optional[BaseModelType] = None,
model_type: Optional[ModelType] = None,
) -> List[AnyModelConfig]:
"""
Return models matching name, base and/or type.
:param model_name: Filter by name of model (optional)
:param base_model: Filter by base model (optional)
:param model_type: Filter by type of model (optional)
If none of the optional filters are passed, will return all
models in the database.
"""
results = []
where_clause = []
bindings = []
if model_name:
where_clause.append("name=?")
bindings.append(model_name)
if base_model:
where_clause.append("base=?")
bindings.append(base_model)
if model_type:
where_clause.append("type=?")
bindings.append(model_type)
where = f"WHERE {' AND '.join(where_clause)}" if where_clause else ""
with self._db.lock:
self._cursor.execute(
f"""--sql
select config FROM model_config
{where};
""",
tuple(bindings),
)
results = [ModelConfigFactory.make_config(json.loads(x[0])) for x in self._cursor.fetchall()]
return results
def search_by_path(self, path: Union[str, Path]) -> List[ModelConfigBase]:
"""Return models with the indicated path."""
results = []
with self._db.lock:
self._cursor.execute(
"""--sql
SELECT config FROM model_config
WHERE model_path=?;
""",
(str(path),),
)
results = [ModelConfigFactory.make_config(json.loads(x[0])) for x in self._cursor.fetchall()]
return results
def search_by_hash(self, hash: str) -> List[ModelConfigBase]:
"""Return models with the indicated original_hash."""
results = []
with self._db.lock:
self._cursor.execute(
"""--sql
SELECT config FROM model_config
WHERE original_hash=?;
""",
(hash,),
)
results = [ModelConfigFactory.make_config(json.loads(x[0])) for x in self._cursor.fetchall()]
return results

View File

@ -33,11 +33,9 @@ class DefaultSessionProcessor(SessionProcessorBase):
self.__thread = Thread( self.__thread = Thread(
name="session_processor", name="session_processor",
target=self.__process, target=self.__process,
kwargs={ kwargs=dict(
"stop_event": self.__stop_event, stop_event=self.__stop_event, poll_now_event=self.__poll_now_event, resume_event=self.__resume_event
"poll_now_event": self.__poll_now_event, ),
"resume_event": self.__resume_event,
},
) )
self.__thread.start() self.__thread.start()

View File

@ -129,12 +129,12 @@ class Batch(BaseModel):
return v return v
model_config = ConfigDict( model_config = ConfigDict(
json_schema_extra={ json_schema_extra=dict(
"required": [ required=[
"graph", "graph",
"runs", "runs",
] ]
} )
) )
@ -191,8 +191,8 @@ class SessionQueueItemWithoutGraph(BaseModel):
return SessionQueueItemDTO(**queue_item_dict) return SessionQueueItemDTO(**queue_item_dict)
model_config = ConfigDict( model_config = ConfigDict(
json_schema_extra={ json_schema_extra=dict(
"required": [ required=[
"item_id", "item_id",
"status", "status",
"batch_id", "batch_id",
@ -203,7 +203,7 @@ class SessionQueueItemWithoutGraph(BaseModel):
"created_at", "created_at",
"updated_at", "updated_at",
] ]
} )
) )
@ -222,8 +222,8 @@ class SessionQueueItem(SessionQueueItemWithoutGraph):
return SessionQueueItem(**queue_item_dict) return SessionQueueItem(**queue_item_dict)
model_config = ConfigDict( model_config = ConfigDict(
json_schema_extra={ json_schema_extra=dict(
"required": [ required=[
"item_id", "item_id",
"status", "status",
"batch_id", "batch_id",
@ -235,7 +235,7 @@ class SessionQueueItem(SessionQueueItemWithoutGraph):
"created_at", "created_at",
"updated_at", "updated_at",
] ]
} )
) )
@ -355,7 +355,7 @@ def create_session_nfv_tuples(
for item in batch_datum.items for item in batch_datum.items
] ]
node_field_values_to_zip.append(node_field_values) node_field_values_to_zip.append(node_field_values)
data.append(list(zip(*node_field_values_to_zip, strict=True))) # type: ignore [arg-type] data.append(list(zip(*node_field_values_to_zip))) # type: ignore [arg-type]
# create generator to yield session,nfv tuples # create generator to yield session,nfv tuples
count = 0 count = 0
@ -383,7 +383,7 @@ def calc_session_count(batch: Batch) -> int:
for batch_datum in batch_datum_list: for batch_datum in batch_datum_list:
batch_data_items = range(len(batch_datum.items)) batch_data_items = range(len(batch_datum.items))
to_zip.append(batch_data_items) to_zip.append(batch_data_items)
data.append(list(zip(*to_zip, strict=True))) data.append(list(zip(*to_zip)))
data_product = list(product(*data)) data_product = list(product(*data))
return len(data_product) * batch.runs return len(data_product) * batch.runs

View File

@ -78,7 +78,7 @@ def create_system_graphs(graph_library: ItemStorageABC[LibraryGraph]) -> list[Li
"""Creates the default system graphs, or adds new versions if the old ones don't match""" """Creates the default system graphs, or adds new versions if the old ones don't match"""
# TODO: Uncomment this when we are ready to fix this up to prevent breaking changes # TODO: Uncomment this when we are ready to fix this up to prevent breaking changes
graphs: list[LibraryGraph] = [] graphs: list[LibraryGraph] = list()
text_to_image = graph_library.get(default_text_to_image_graph_id) text_to_image = graph_library.get(default_text_to_image_graph_id)

View File

@ -352,7 +352,7 @@ class Graph(BaseModel):
# Validate that all node ids are unique # Validate that all node ids are unique
node_ids = [n.id for n in self.nodes.values()] node_ids = [n.id for n in self.nodes.values()]
duplicate_node_ids = {node_id for node_id in node_ids if node_ids.count(node_id) >= 2} duplicate_node_ids = set([node_id for node_id in node_ids if node_ids.count(node_id) >= 2])
if duplicate_node_ids: if duplicate_node_ids:
raise DuplicateNodeIdError(f"Node ids must be unique, found duplicates {duplicate_node_ids}") raise DuplicateNodeIdError(f"Node ids must be unique, found duplicates {duplicate_node_ids}")
@ -616,7 +616,7 @@ class Graph(BaseModel):
self, node_path: str, prefix: Optional[str] = None self, node_path: str, prefix: Optional[str] = None
) -> list[tuple["Graph", Union[str, None], Edge]]: ) -> list[tuple["Graph", Union[str, None], Edge]]:
"""Gets all input edges for a node along with the graph they are in and the graph's path""" """Gets all input edges for a node along with the graph they are in and the graph's path"""
edges = [] edges = list()
# Return any input edges that appear in this graph # Return any input edges that appear in this graph
edges.extend([(self, prefix, e) for e in self.edges if e.destination.node_id == node_path]) edges.extend([(self, prefix, e) for e in self.edges if e.destination.node_id == node_path])
@ -658,7 +658,7 @@ class Graph(BaseModel):
self, node_path: str, prefix: Optional[str] = None self, node_path: str, prefix: Optional[str] = None
) -> list[tuple["Graph", Union[str, None], Edge]]: ) -> list[tuple["Graph", Union[str, None], Edge]]:
"""Gets all output edges for a node along with the graph they are in and the graph's path""" """Gets all output edges for a node along with the graph they are in and the graph's path"""
edges = [] edges = list()
# Return any input edges that appear in this graph # Return any input edges that appear in this graph
edges.extend([(self, prefix, e) for e in self.edges if e.source.node_id == node_path]) edges.extend([(self, prefix, e) for e in self.edges if e.source.node_id == node_path])
@ -680,8 +680,8 @@ class Graph(BaseModel):
new_input: Optional[EdgeConnection] = None, new_input: Optional[EdgeConnection] = None,
new_output: Optional[EdgeConnection] = None, new_output: Optional[EdgeConnection] = None,
) -> bool: ) -> bool:
inputs = [e.source for e in self._get_input_edges(node_path, "collection")] inputs = list([e.source for e in self._get_input_edges(node_path, "collection")])
outputs = [e.destination for e in self._get_output_edges(node_path, "item")] outputs = list([e.destination for e in self._get_output_edges(node_path, "item")])
if new_input is not None: if new_input is not None:
inputs.append(new_input) inputs.append(new_input)
@ -694,7 +694,7 @@ class Graph(BaseModel):
# Get input and output fields (the fields linked to the iterator's input/output) # Get input and output fields (the fields linked to the iterator's input/output)
input_field = get_output_field(self.get_node(inputs[0].node_id), inputs[0].field) input_field = get_output_field(self.get_node(inputs[0].node_id), inputs[0].field)
output_fields = [get_input_field(self.get_node(e.node_id), e.field) for e in outputs] output_fields = list([get_input_field(self.get_node(e.node_id), e.field) for e in outputs])
# Input type must be a list # Input type must be a list
if get_origin(input_field) != list: if get_origin(input_field) != list:
@ -713,8 +713,8 @@ class Graph(BaseModel):
new_input: Optional[EdgeConnection] = None, new_input: Optional[EdgeConnection] = None,
new_output: Optional[EdgeConnection] = None, new_output: Optional[EdgeConnection] = None,
) -> bool: ) -> bool:
inputs = [e.source for e in self._get_input_edges(node_path, "item")] inputs = list([e.source for e in self._get_input_edges(node_path, "item")])
outputs = [e.destination for e in self._get_output_edges(node_path, "collection")] outputs = list([e.destination for e in self._get_output_edges(node_path, "collection")])
if new_input is not None: if new_input is not None:
inputs.append(new_input) inputs.append(new_input)
@ -722,16 +722,18 @@ class Graph(BaseModel):
outputs.append(new_output) outputs.append(new_output)
# Get input and output fields (the fields linked to the iterator's input/output) # Get input and output fields (the fields linked to the iterator's input/output)
input_fields = [get_output_field(self.get_node(e.node_id), e.field) for e in inputs] input_fields = list([get_output_field(self.get_node(e.node_id), e.field) for e in inputs])
output_fields = [get_input_field(self.get_node(e.node_id), e.field) for e in outputs] output_fields = list([get_input_field(self.get_node(e.node_id), e.field) for e in outputs])
# Validate that all inputs are derived from or match a single type # Validate that all inputs are derived from or match a single type
input_field_types = { input_field_types = set(
t [
for input_field in input_fields t
for t in ([input_field] if get_origin(input_field) is None else get_args(input_field)) for input_field in input_fields
if t != NoneType for t in ([input_field] if get_origin(input_field) is None else get_args(input_field))
} # Get unique types if t != NoneType
]
) # Get unique types
type_tree = nx.DiGraph() type_tree = nx.DiGraph()
type_tree.add_nodes_from(input_field_types) type_tree.add_nodes_from(input_field_types)
type_tree.add_edges_from([e for e in itertools.permutations(input_field_types, 2) if issubclass(e[1], e[0])]) type_tree.add_edges_from([e for e in itertools.permutations(input_field_types, 2) if issubclass(e[1], e[0])])
@ -759,15 +761,15 @@ class Graph(BaseModel):
"""Returns a NetworkX DiGraph representing the layout of this graph""" """Returns a NetworkX DiGraph representing the layout of this graph"""
# TODO: Cache this? # TODO: Cache this?
g = nx.DiGraph() g = nx.DiGraph()
g.add_nodes_from(list(self.nodes.keys())) g.add_nodes_from([n for n in self.nodes.keys()])
g.add_edges_from({(e.source.node_id, e.destination.node_id) for e in self.edges}) g.add_edges_from(set([(e.source.node_id, e.destination.node_id) for e in self.edges]))
return g return g
def nx_graph_with_data(self) -> nx.DiGraph: def nx_graph_with_data(self) -> nx.DiGraph:
"""Returns a NetworkX DiGraph representing the data and layout of this graph""" """Returns a NetworkX DiGraph representing the data and layout of this graph"""
g = nx.DiGraph() g = nx.DiGraph()
g.add_nodes_from(list(self.nodes.items())) g.add_nodes_from([n for n in self.nodes.items()])
g.add_edges_from({(e.source.node_id, e.destination.node_id) for e in self.edges}) g.add_edges_from(set([(e.source.node_id, e.destination.node_id) for e in self.edges]))
return g return g
def nx_graph_flat(self, nx_graph: Optional[nx.DiGraph] = None, prefix: Optional[str] = None) -> nx.DiGraph: def nx_graph_flat(self, nx_graph: Optional[nx.DiGraph] = None, prefix: Optional[str] = None) -> nx.DiGraph:
@ -789,7 +791,7 @@ class Graph(BaseModel):
# TODO: figure out if iteration nodes need to be expanded # TODO: figure out if iteration nodes need to be expanded
unique_edges = {(e.source.node_id, e.destination.node_id) for e in self.edges} unique_edges = set([(e.source.node_id, e.destination.node_id) for e in self.edges])
g.add_edges_from([(self._get_node_path(e[0], prefix), self._get_node_path(e[1], prefix)) for e in unique_edges]) g.add_edges_from([(self._get_node_path(e[0], prefix), self._get_node_path(e[1], prefix)) for e in unique_edges])
return g return g
@ -841,8 +843,8 @@ class GraphExecutionState(BaseModel):
return v return v
model_config = ConfigDict( model_config = ConfigDict(
json_schema_extra={ json_schema_extra=dict(
"required": [ required=[
"id", "id",
"graph", "graph",
"execution_graph", "execution_graph",
@ -853,7 +855,7 @@ class GraphExecutionState(BaseModel):
"prepared_source_mapping", "prepared_source_mapping",
"source_prepared_mapping", "source_prepared_mapping",
] ]
} )
) )
def next(self) -> Optional[BaseInvocation]: def next(self) -> Optional[BaseInvocation]:
@ -893,7 +895,7 @@ class GraphExecutionState(BaseModel):
source_node = self.prepared_source_mapping[node_id] source_node = self.prepared_source_mapping[node_id]
prepared_nodes = self.source_prepared_mapping[source_node] prepared_nodes = self.source_prepared_mapping[source_node]
if all(n in self.executed for n in prepared_nodes): if all([n in self.executed for n in prepared_nodes]):
self.executed.add(source_node) self.executed.add(source_node)
self.executed_history.append(source_node) self.executed_history.append(source_node)
@ -928,7 +930,7 @@ class GraphExecutionState(BaseModel):
input_collection = getattr(input_collection_prepared_node_output, input_collection_edge.source.field) input_collection = getattr(input_collection_prepared_node_output, input_collection_edge.source.field)
self_iteration_count = len(input_collection) self_iteration_count = len(input_collection)
new_nodes: list[str] = [] new_nodes: list[str] = list()
if self_iteration_count == 0: if self_iteration_count == 0:
# TODO: should this raise a warning? It might just happen if an empty collection is input, and should be valid. # TODO: should this raise a warning? It might just happen if an empty collection is input, and should be valid.
return new_nodes return new_nodes
@ -938,7 +940,7 @@ class GraphExecutionState(BaseModel):
# Create new edges for this iteration # Create new edges for this iteration
# For collect nodes, this may contain multiple inputs to the same field # For collect nodes, this may contain multiple inputs to the same field
new_edges: list[Edge] = [] new_edges: list[Edge] = list()
for edge in input_edges: for edge in input_edges:
for input_node_id in (n[1] for n in iteration_node_map if n[0] == edge.source.node_id): for input_node_id in (n[1] for n in iteration_node_map if n[0] == edge.source.node_id):
new_edge = Edge( new_edge = Edge(
@ -1032,7 +1034,7 @@ class GraphExecutionState(BaseModel):
# Create execution nodes # Create execution nodes
next_node = self.graph.get_node(next_node_id) next_node = self.graph.get_node(next_node_id)
new_node_ids = [] new_node_ids = list()
if isinstance(next_node, CollectInvocation): if isinstance(next_node, CollectInvocation):
# Collapse all iterator input mappings and create a single execution node for the collect invocation # Collapse all iterator input mappings and create a single execution node for the collect invocation
all_iteration_mappings = list( all_iteration_mappings = list(
@ -1053,10 +1055,7 @@ class GraphExecutionState(BaseModel):
# For every iterator, the parent must either not be a child of that iterator, or must match the prepared iteration for that iterator # For every iterator, the parent must either not be a child of that iterator, or must match the prepared iteration for that iterator
# TODO: Handle a node mapping to none # TODO: Handle a node mapping to none
eg = self.execution_graph.nx_graph_flat() eg = self.execution_graph.nx_graph_flat()
prepared_parent_mappings = [ prepared_parent_mappings = [[(n, self._get_iteration_node(n, g, eg, it)) for n in next_node_parents] for it in iterator_node_prepared_combinations] # type: ignore
[(n, self._get_iteration_node(n, g, eg, it)) for n in next_node_parents]
for it in iterator_node_prepared_combinations
] # type: ignore
# Create execution node for each iteration # Create execution node for each iteration
for iteration_mappings in prepared_parent_mappings: for iteration_mappings in prepared_parent_mappings:
@ -1122,7 +1121,7 @@ class GraphExecutionState(BaseModel):
for edge in input_edges for edge in input_edges
if edge.destination.field == "item" if edge.destination.field == "item"
] ]
node.collection = output_collection setattr(node, "collection", output_collection)
else: else:
for edge in input_edges: for edge in input_edges:
output_value = getattr(self.results[edge.source.node_id], edge.source.field) output_value = getattr(self.results[edge.source.node_id], edge.source.field)
@ -1202,7 +1201,7 @@ class LibraryGraph(BaseModel):
@field_validator("exposed_inputs", "exposed_outputs") @field_validator("exposed_inputs", "exposed_outputs")
def validate_exposed_aliases(cls, v: list[Union[ExposedNodeInput, ExposedNodeOutput]]): def validate_exposed_aliases(cls, v: list[Union[ExposedNodeInput, ExposedNodeOutput]]):
if len(v) != len({i.alias for i in v}): if len(v) != len(set(i.alias for i in v)):
raise ValueError("Duplicate exposed alias") raise ValueError("Duplicate exposed alias")
return v return v

View File

@ -57,7 +57,7 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
INSERT INTO workflows(workflow) INSERT INTO workflows(workflow)
VALUES (?); VALUES (?);
""", """,
(workflow.model_dump_json(),), (workflow.json(),),
) )
self._conn.commit() self._conn.commit()
except Exception: except Exception:

View File

@ -1,5 +0,0 @@
"""
This module contains various classes, functions and models which are shared across the app, particularly by invocations.
Lifting these classes, functions and models into this shared module helps to reduce circular imports.
"""

View File

@ -1,66 +0,0 @@
class FieldDescriptions:
denoising_start = "When to start denoising, expressed a percentage of total steps"
denoising_end = "When to stop denoising, expressed a percentage of total steps"
cfg_scale = "Classifier-Free Guidance scale"
scheduler = "Scheduler to use during inference"
positive_cond = "Positive conditioning tensor"
negative_cond = "Negative conditioning tensor"
noise = "Noise tensor"
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
unet = "UNet (scheduler, LoRAs)"
vae = "VAE"
cond = "Conditioning tensor"
controlnet_model = "ControlNet model to load"
vae_model = "VAE model to load"
lora_model = "LoRA model to load"
main_model = "Main model (UNet, VAE, CLIP) to load"
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
lora_weight = "The weight at which the LoRA is applied to each model"
compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor"
raw_prompt = "Raw prompt text (no parsing)"
sdxl_aesthetic = "The aesthetic score to apply to the conditioning tensor"
skipped_layers = "Number of layers to skip in text encoder"
seed = "Seed for random number generation"
steps = "Number of steps to run"
width = "Width of output (px)"
height = "Height of output (px)"
control = "ControlNet(s) to apply"
ip_adapter = "IP-Adapter to apply"
t2i_adapter = "T2I-Adapter(s) to apply"
denoised_latents = "Denoised latents tensor"
latents = "Latents tensor"
strength = "Strength of denoising (proportional to steps)"
metadata = "Optional metadata to be saved with the image"
metadata_collection = "Collection of Metadata"
metadata_item_polymorphic = "A single metadata item or collection of metadata items"
metadata_item_label = "Label for this metadata item"
metadata_item_value = "The value for this metadata item (may be any type)"
workflow = "Optional workflow to be saved with the image"
interp_mode = "Interpolation mode"
torch_antialias = "Whether or not to apply antialiasing (bilinear or bicubic only)"
fp32 = "Whether or not to use full float32 precision"
precision = "Precision to use"
tiled = "Processing using overlapping tiles (reduce memory consumption)"
detect_res = "Pixel resolution for detection"
image_res = "Pixel resolution for output image"
safe_mode = "Whether or not to use safe mode"
scribble_mode = "Whether or not to use scribble mode"
scale_factor = "The factor by which to scale"
blend_alpha = (
"Blending factor. 0.0 = use input A only, 1.0 = use input B only, 0.5 = 50% mix of input A and input B."
)
num_1 = "The first number"
num_2 = "The second number"
mask = "The mask to use for the operation"
board = "The board to save the image to"
image = "The image to process"
tile_size = "Tile size"
inclusive_low = "The inclusive low value"
exclusive_high = "The exclusive high value"
decimal_places = "The number of decimal places to round to"
freeu_s1 = 'Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.'
freeu_s2 = 'Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.'
freeu_b1 = "Scaling factor for stage 1 to amplify the contributions of backbone features."
freeu_b2 = "Scaling factor for stage 2 to amplify the contributions of backbone features."

View File

@ -1,16 +0,0 @@
from pydantic import BaseModel, Field
from invokeai.app.shared.fields import FieldDescriptions
class FreeUConfig(BaseModel):
"""
Configuration for the FreeU hyperparameters.
- https://huggingface.co/docs/diffusers/main/en/using-diffusers/freeu
- https://github.com/ChenyangSi/FreeU
"""
s1: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_s1)
s2: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_s2)
b1: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_b1)
b2: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_b2)

View File

@ -59,7 +59,7 @@ def thin_one_time(x, kernels):
def lvmin_thin(x, prunings=True): def lvmin_thin(x, prunings=True):
y = x y = x
for _i in range(32): for i in range(32):
y, is_done = thin_one_time(y, lvmin_kernels) y, is_done = thin_one_time(y, lvmin_kernels)
if is_done: if is_done:
break break

View File

@ -21,11 +21,11 @@ def get_metadata_graph_from_raw_session(session_raw: str) -> Optional[dict]:
# sanity check make sure the graph is at least reasonably shaped # sanity check make sure the graph is at least reasonably shaped
if ( if (
not isinstance(graph, dict) type(graph) is not dict
or "nodes" not in graph or "nodes" not in graph
or not isinstance(graph["nodes"], dict) or type(graph["nodes"]) is not dict
or "edges" not in graph or "edges" not in graph
or not isinstance(graph["edges"], list) or type(graph["edges"]) is not list
): ):
# something has gone terribly awry, return an empty dict # something has gone terribly awry, return an empty dict
return None return None

View File

@ -88,7 +88,7 @@ class PromptFormatter:
t2i = self.t2i t2i = self.t2i
opt = self.opt opt = self.opt
switches = [] switches = list()
switches.append(f'"{opt.prompt}"') switches.append(f'"{opt.prompt}"')
switches.append(f"-s{opt.steps or t2i.steps}") switches.append(f"-s{opt.steps or t2i.steps}")
switches.append(f"-W{opt.width or t2i.width}") switches.append(f"-W{opt.width or t2i.width}")

View File

@ -88,7 +88,7 @@ class Txt2Mask(object):
provided image and returns a SegmentedGrayscale object in which the brighter provided image and returns a SegmentedGrayscale object in which the brighter
pixels indicate where the object is inferred to be. pixels indicate where the object is inferred to be.
""" """
if isinstance(image, str): if type(image) is str:
image = Image.open(image).convert("RGB") image = Image.open(image).convert("RGB")
image = ImageOps.exif_transpose(image) image = ImageOps.exif_transpose(image)

View File

@ -40,7 +40,7 @@ class InitImageResizer:
(rw, rh) = (int(scale * im.width), int(scale * im.height)) (rw, rh) = (int(scale * im.width), int(scale * im.height))
# round everything to multiples of 64 # round everything to multiples of 64
width, height, rw, rh = (x - x % 64 for x in (width, height, rw, rh)) width, height, rw, rh = map(lambda x: x - x % 64, (width, height, rw, rh))
# no resize necessary, but return a copy # no resize necessary, but return a copy
if im.width == width and im.height == height: if im.width == width and im.height == height:

View File

@ -32,7 +32,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS
from huggingface_hub import HfFolder from huggingface_hub import HfFolder
from huggingface_hub import login as hf_hub_login from huggingface_hub import login as hf_hub_login
from omegaconf import OmegaConf from omegaconf import OmegaConf
from pydantic import ValidationError from pydantic.error_wrappers import ValidationError
from tqdm import tqdm from tqdm import tqdm
from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextConfig, CLIPTextModel, CLIPTokenizer from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextConfig, CLIPTextModel, CLIPTokenizer
@ -197,7 +197,7 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
def download_conversion_models(): def download_conversion_models():
target_dir = config.models_path / "core/convert" target_dir = config.models_path / "core/convert"
kwargs = {} # for future use kwargs = dict() # for future use
try: try:
logger.info("Downloading core tokenizers and text encoders") logger.info("Downloading core tokenizers and text encoders")
@ -252,26 +252,26 @@ def download_conversion_models():
def download_realesrgan(): def download_realesrgan():
logger.info("Installing ESRGAN Upscaling models...") logger.info("Installing ESRGAN Upscaling models...")
URLs = [ URLs = [
{ dict(
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth", url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
"dest": "core/upscaling/realesrgan/RealESRGAN_x4plus.pth", dest="core/upscaling/realesrgan/RealESRGAN_x4plus.pth",
"description": "RealESRGAN_x4plus.pth", description="RealESRGAN_x4plus.pth",
}, ),
{ dict(
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth", url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
"dest": "core/upscaling/realesrgan/RealESRGAN_x4plus_anime_6B.pth", dest="core/upscaling/realesrgan/RealESRGAN_x4plus_anime_6B.pth",
"description": "RealESRGAN_x4plus_anime_6B.pth", description="RealESRGAN_x4plus_anime_6B.pth",
}, ),
{ dict(
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth", url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
"dest": "core/upscaling/realesrgan/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth", dest="core/upscaling/realesrgan/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
"description": "ESRGAN_SRx4_DF2KOST_official.pth", description="ESRGAN_SRx4_DF2KOST_official.pth",
}, ),
{ dict(
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth", url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
"dest": "core/upscaling/realesrgan/RealESRGAN_x2plus.pth", dest="core/upscaling/realesrgan/RealESRGAN_x2plus.pth",
"description": "RealESRGAN_x2plus.pth", description="RealESRGAN_x2plus.pth",
}, ),
] ]
for model in URLs: for model in URLs:
download_with_progress_bar(model["url"], config.models_path / model["dest"], model["description"]) download_with_progress_bar(model["url"], config.models_path / model["dest"], model["description"])
@ -680,7 +680,7 @@ def default_user_selections(program_opts: Namespace) -> InstallSelections:
if program_opts.default_only if program_opts.default_only
else [models[x].path or models[x].repo_id for x in installer.recommended_models()] else [models[x].path or models[x].repo_id for x in installer.recommended_models()]
if program_opts.yes_to_all if program_opts.yes_to_all
else [], else list(),
) )

View File

@ -38,7 +38,6 @@ SAMPLER_CHOICES = [
"k_heun", "k_heun",
"k_lms", "k_lms",
"plms", "plms",
"lcm",
] ]
PRECISION_CHOICES = [ PRECISION_CHOICES = [

Some files were not shown because too many files have changed in this diff Show More