diff --git a/.github/workflows/style-checks.yml b/.github/workflows/style-checks.yml
index 96e1df406b..08ff8ba402 100644
--- a/.github/workflows/style-checks.yml
+++ b/.github/workflows/style-checks.yml
@@ -1,6 +1,4 @@
name: style checks
-# just formatting and flake8 for now
-# TODO: add isort later
on:
pull_request:
@@ -20,8 +18,8 @@ jobs:
- name: Install dependencies with pip
run: |
- pip install black flake8 Flake8-pyproject
+ pip install black flake8 Flake8-pyproject isort
- # - run: isort --check-only .
+ - run: isort --check-only .
- run: black --check .
- run: flake8
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 9ef8b79e77..6cff07a959 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -15,3 +15,10 @@ repos:
language: system
entry: flake8
types: [python]
+
+ - id: isort
+ name: isort
+ stages: [commit]
+ language: system
+ entry: isort
+ types: [python]
\ No newline at end of file
diff --git a/README.md b/README.md
index c40b9ef418..15c5747ea8 100644
--- a/README.md
+++ b/README.md
@@ -46,13 +46,13 @@ the foundation for multiple commercial products.
Install](https://invoke-ai.github.io/InvokeAI/installation/INSTALLATION/)] [Discord Server] [Documentation and
- Tutorials] [Code and
- Downloads] [Bug Reports]
+ Tutorials]
+ [Bug Reports]
[Discussion,
- Ideas & Q&A]
+ Ideas & Q&A]
+ [Contributing]
@@ -368,9 +368,9 @@ InvokeAI offers a locally hosted Web Server & React Frontend, with an industry l
The Unified Canvas is a fully integrated canvas implementation with support for all core generation capabilities, in/outpainting, brush tools, and more. This creative tool unlocks the capability for artists to create with AI as a creative collaborator, and can be used to augment AI-generated imagery, sketches, photography, renders, and more.
-### *Node Architecture & Editor (Beta)*
+### *Workflows & Nodes*
-Invoke AI's backend is built on a graph-based execution architecture. This allows for customizable generation pipelines to be developed by professional users looking to create specific workflows to support their production use-cases, and will be extended in the future with additional capabilities.
+InvokeAI offers a fully featured workflow management solution, enabling users to combine the power of nodes based workflows with the easy of a UI. This allows for customizable generation pipelines to be developed and shared by users looking to create specific workflows to support their production use-cases.
### *Board & Gallery Management*
@@ -383,8 +383,9 @@ Invoke AI provides an organized gallery system for easily storing, accessing, an
- *Upscaling Tools*
- *Embedding Manager & Support*
- *Model Manager & Support*
+- *Workflow creation & management*
- *Node-Based Architecture*
-- *Node-Based Plug-&-Play UI (Beta)*
+
### Latest Changes
@@ -395,20 +396,18 @@ Notes](https://github.com/invoke-ai/InvokeAI/releases) and the
### Troubleshooting
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
-problems and other issues.
+problems and other issues. For more help, please join our [Discord][discord link]
## Contributing
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
cleanup, testing, or code reviews, is very much encouraged to do so.
-To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
-
-If you'd like to help with translation, please see our [translation guide](docs/other/TRANSLATION.md).
+Get started with contributing by reading our [Contribution documentation](https://invoke-ai.github.io/InvokeAI/contributing/CONTRIBUTING/), joining the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) or the GitHub discussion board.
If you are unfamiliar with how
-to contribute to GitHub projects, here is a
-[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
+to contribute to GitHub projects, we have a new contributor checklist you can follow to get started contributing:
+[New Contributor Checklist](https://invoke-ai.github.io/InvokeAI/contributing/contribution_guides/newContributorChecklist/).
We hope you enjoy using our software as much as we enjoy creating it,
and we hope that some of those of you who are reading this will elect
@@ -424,7 +423,7 @@ their time, hard work and effort.
### Support
-For support, please use this repository's GitHub Issues tracking service, or join the Discord.
+For support, please use this repository's GitHub Issues tracking service, or join the [Discord][discord link].
Original portions of the software are Copyright (c) 2023 by respective contributors.
diff --git a/docs/assets/nodes/groupsallscale.png b/docs/assets/nodes/groupsallscale.png
index aa67db9a3e..5a12fe9e13 100644
Binary files a/docs/assets/nodes/groupsallscale.png and b/docs/assets/nodes/groupsallscale.png differ
diff --git a/docs/assets/nodes/groupsconditioning.png b/docs/assets/nodes/groupsconditioning.png
index bb0e210f42..cf38a00290 100644
Binary files a/docs/assets/nodes/groupsconditioning.png and b/docs/assets/nodes/groupsconditioning.png differ
diff --git a/docs/assets/nodes/groupscontrol.png b/docs/assets/nodes/groupscontrol.png
index ad696c3087..f2e3d43e7d 100644
Binary files a/docs/assets/nodes/groupscontrol.png and b/docs/assets/nodes/groupscontrol.png differ
diff --git a/docs/assets/nodes/groupsimgvae.png b/docs/assets/nodes/groupsimgvae.png
index c60bf40d67..ae6967cab1 100644
Binary files a/docs/assets/nodes/groupsimgvae.png and b/docs/assets/nodes/groupsimgvae.png differ
diff --git a/docs/assets/nodes/groupsiterate.png b/docs/assets/nodes/groupsiterate.png
index 9c1cd15bc2..82ec8bf020 100644
Binary files a/docs/assets/nodes/groupsiterate.png and b/docs/assets/nodes/groupsiterate.png differ
diff --git a/docs/assets/nodes/groupslora.png b/docs/assets/nodes/groupslora.png
index befcee6490..736aff5914 100644
Binary files a/docs/assets/nodes/groupslora.png and b/docs/assets/nodes/groupslora.png differ
diff --git a/docs/assets/nodes/groupsmultigenseeding.png b/docs/assets/nodes/groupsmultigenseeding.png
index a644146c86..b54ec2afb7 100644
Binary files a/docs/assets/nodes/groupsmultigenseeding.png and b/docs/assets/nodes/groupsmultigenseeding.png differ
diff --git a/docs/assets/nodes/groupsnoise.png b/docs/assets/nodes/groupsnoise.png
index 2df51d434e..c825e314ae 100644
Binary files a/docs/assets/nodes/groupsnoise.png and b/docs/assets/nodes/groupsnoise.png differ
diff --git a/docs/assets/nodes/groupsrandseed.png b/docs/assets/nodes/groupsrandseed.png
index 06430cdee4..9b8bcfdb17 100644
Binary files a/docs/assets/nodes/groupsrandseed.png and b/docs/assets/nodes/groupsrandseed.png differ
diff --git a/docs/assets/nodes/linearview.png b/docs/assets/nodes/linearview.png
new file mode 100644
index 0000000000..0dc28534e3
Binary files /dev/null and b/docs/assets/nodes/linearview.png differ
diff --git a/docs/assets/prompt_syntax/sdxl-prompt-concatenated.png b/docs/assets/prompt_syntax/sdxl-prompt-concatenated.png
new file mode 100644
index 0000000000..8d5336da3d
Binary files /dev/null and b/docs/assets/prompt_syntax/sdxl-prompt-concatenated.png differ
diff --git a/docs/assets/prompt_syntax/sdxl-prompt.png b/docs/assets/prompt_syntax/sdxl-prompt.png
new file mode 100644
index 0000000000..b85464c5ad
Binary files /dev/null and b/docs/assets/prompt_syntax/sdxl-prompt.png differ
diff --git a/docs/contributing/CONTRIBUTING.md b/docs/contributing/CONTRIBUTING.md
index 29fea1cf59..ccaf4f2561 100644
--- a/docs/contributing/CONTRIBUTING.md
+++ b/docs/contributing/CONTRIBUTING.md
@@ -1,39 +1,41 @@
-# How to Contribute
+# Contributing
-## Welcome to Invoke AI
Invoke AI originated as a project built by the community, and that vision carries forward today as we aim to build the best pro-grade tools available. We work together to incorporate the latest in AI/ML research, making these tools available in over 20 languages to artists and creatives around the world as part of our fully permissive OSS project designed for individual users to self-host and use.
-## Contributing to Invoke AI
+# Methods of Contributing to Invoke AI
Anyone who wishes to contribute to InvokeAI, whether features, bug fixes, code cleanup, testing, code reviews, documentation or translation is very much encouraged to do so.
-To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
+## Development
+If you’d like to help with development, please see our [development guide](contribution_guides/development.md).
-### Areas of contribution:
+**New Contributors:** If you’re unfamiliar with contributing to open source projects, take a look at our [new contributor guide](contribution_guides/newContributorChecklist.md).
-#### Development
-If you’d like to help with development, please see our [development guide](contribution_guides/development.md). If you’re unfamiliar with contributing to open source projects, there is a tutorial contained within the development guide.
+## Nodes
+If you’d like to add a Node, please see our [nodes contribution guide](../nodes/contributingNodes.md).
-#### Nodes
-If you’d like to help with development, please see our [nodes contribution guide](/nodes/contributingNodes). If you’re unfamiliar with contributing to open source projects, there is a tutorial contained within the development guide.
+## Support and Triaging
+Helping support other users in [Discord](https://discord.gg/ZmtBAhwWhy) and on Github are valuable forms of contribution that we greatly appreciate.
-#### Documentation
+We receive many issues and requests for help from users. We're limited in bandwidth relative to our the user base, so providing answers to questions or helping identify causes of issues is very helpful. By doing this, you enable us to spend time on the highest priority work.
+
+## Documentation
If you’d like to help with documentation, please see our [documentation guide](contribution_guides/documentation.md).
-#### Translation
+## Translation
If you'd like to help with translation, please see our [translation guide](contribution_guides/translation.md).
-#### Tutorials
+## Tutorials
Please reach out to @imic or @hipsterusername on [Discord](https://discord.gg/ZmtBAhwWhy) to help create tutorials for InvokeAI.
We hope you enjoy using our software as much as we enjoy creating it, and we hope that some of those of you who are reading this will elect to become part of our contributor community.
-### Contributors
+# Contributors
This project is a combined effort of dedicated people from across the world. [Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for their time, hard work and effort.
-### Code of Conduct
+# Code of Conduct
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](https://github.com/invoke-ai/InvokeAI/blob/main/CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
@@ -47,8 +49,7 @@ By making a contribution to this project, you certify that:
This disclaimer is not a license and does not grant any rights or permissions. You must obtain necessary permissions and licenses, including from third parties, before contributing to this project.
This disclaimer is provided "as is" without warranty of any kind, whether expressed or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, or non-infringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the contribution or the use or other dealings in the contribution.
-
-### Support
+# Support
For support, please use this repository's [GitHub Issues](https://github.com/invoke-ai/InvokeAI/issues), or join the [Discord](https://discord.gg/ZmtBAhwWhy).
diff --git a/docs/contributing/contribution_guides/development_guides/contributingToFrontend.md b/docs/contributing/contribution_guides/contributingToFrontend.md
similarity index 100%
rename from docs/contributing/contribution_guides/development_guides/contributingToFrontend.md
rename to docs/contributing/contribution_guides/contributingToFrontend.md
diff --git a/docs/contributing/contribution_guides/development.md b/docs/contributing/contribution_guides/development.md
index a867c7fa1f..fcfb82c811 100644
--- a/docs/contributing/contribution_guides/development.md
+++ b/docs/contributing/contribution_guides/development.md
@@ -4,14 +4,21 @@
If you are looking to help to with a code contribution, InvokeAI uses several different technologies under the hood: Python (Pydantic, FastAPI, diffusers) and Typescript (React, Redux Toolkit, ChakraUI, Mantine, Konva). Familiarity with StableDiffusion and image generation concepts is helpful, but not essential.
-For more information, please review our area specific documentation:
+
+## **Get Started**
+
+To get started, take a look at our [new contributors checklist](newContributorChecklist.md)
+
+Once you're setup, for more information, you can review the documentation specific to your area of interest:
* #### [InvokeAI Architecure](../ARCHITECTURE.md)
* #### [Frontend Documentation](development_guides/contributingToFrontend.md)
* #### [Node Documentation](../INVOCATIONS.md)
* #### [Local Development](../LOCAL_DEVELOPMENT.md)
-If you don't feel ready to make a code contribution yet, no problem! You can also help out in other ways, such as [documentation](documentation.md) or [translation](translation.md).
+
+
+If you don't feel ready to make a code contribution yet, no problem! You can also help out in other ways, such as [documentation](documentation.md), [translation](translation.md) or helping support other users and triage issues as they're reported in GitHub.
There are two paths to making a development contribution:
@@ -23,60 +30,10 @@ There are two paths to making a development contribution:
## Best Practices:
* Keep your pull requests small. Smaller pull requests are more likely to be accepted and merged
-* Comments! Commenting your code helps reviwers easily understand your contribution
+* Comments! Commenting your code helps reviewers easily understand your contribution
* Use Python and Typescript’s typing systems, and consider using an editor with [LSP](https://microsoft.github.io/language-server-protocol/) support to streamline development
* Make all communications public. This ensure knowledge is shared with the whole community
-## **How do I make a contribution?**
-
-Never made an open source contribution before? Wondering how contributions work in our project? Here's a quick rundown!
-
-Before starting these steps, ensure you have your local environment [configured for development](../LOCAL_DEVELOPMENT.md).
-
-1. Find a [good first issue](https://github.com/invoke-ai/InvokeAI/contribute) that you are interested in addressing or a feature that you would like to add. Then, reach out to our team in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord to ensure you are setup for success.
-2. Fork the [InvokeAI](https://github.com/invoke-ai/InvokeAI) repository to your GitHub profile. This means that you will have a copy of the repository under **your-GitHub-username/InvokeAI**.
-3. Clone the repository to your local machine using:
-
-```bash
-git clone https://github.com/your-GitHub-username/InvokeAI.git
-```
-
-If you're unfamiliar with using Git through the commandline, [GitHub Desktop](https://desktop.github.com) is a easy-to-use alternative with a UI. You can do all the same steps listed here, but through the interface.
-
-4. Create a new branch for your fix using:
-
-```bash
-git checkout -b branch-name-here
-```
-
-5. Make the appropriate changes for the issue you are trying to address or the feature that you want to add.
-6. Add the file contents of the changed files to the "snapshot" git uses to manage the state of the project, also known as the index:
-
-```bash
-git add insert-paths-of-changed-files-here
-```
-
-7. Store the contents of the index with a descriptive message.
-
-```bash
-git commit -m "Insert a short message of the changes made here"
-```
-
-8. Push the changes to the remote repository using
-
-```markdown
-git push origin branch-name-here
-```
-
-9. Submit a pull request to the **main** branch of the InvokeAI repository.
-10. Title the pull request with a short description of the changes made and the issue or bug number associated with your change. For example, you can title an issue like so "Added more log outputting to resolve #1234".
-11. In the description of the pull request, explain the changes that you made, any issues you think exist with the pull request you made, and any questions you have for the maintainer. It's OK if your pull request is not perfect (no pull request is), the reviewer will be able to help you fix any problems and improve it!
-12. Wait for the pull request to be reviewed by other collaborators.
-13. Make changes to the pull request if the reviewer(s) recommend them.
-14. Celebrate your success after your pull request is merged!
-
-If you’d like to learn more about contributing to Open Source projects, here is a [Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github).
-
## **Where can I go for help?**
If you need help, you can ask questions in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord.
@@ -85,6 +42,7 @@ For frontend related work, **@pyschedelicious** is the best person to reach out
For backend related work, please reach out to **@blessedcoolant**, **@lstein**, **@StAlKeR7779** or **@pyschedelicious**.
+
## **What does the Code of Conduct mean for me?**
Our [Code of Conduct](CODE_OF_CONDUCT.md) means that you are responsible for treating everyone on the project with respect and courtesy regardless of their identity. If you are the victim of any inappropriate behavior or comments as described in our Code of Conduct, we are here for you and will do the best to ensure that the abuser is reprimanded appropriately, per our code.
diff --git a/docs/contributing/contribution_guides/newContributorChecklist.md b/docs/contributing/contribution_guides/newContributorChecklist.md
new file mode 100644
index 0000000000..90725f99ab
--- /dev/null
+++ b/docs/contributing/contribution_guides/newContributorChecklist.md
@@ -0,0 +1,68 @@
+# New Contributor Guide
+
+If you're a new contributor to InvokeAI or Open Source Projects, this is the guide for you.
+
+## New Contributor Checklist
+- [x] Set up your local development environment & fork of InvokAI by following [the steps outlined here](../../installation/020_INSTALL_MANUAL.md#developer-install)
+- [x] Set up your local tooling with [this guide](InvokeAI/contributing/LOCAL_DEVELOPMENT/#developing-invokeai-in-vscode). Feel free to skip this step if you already have tooling you're comfortable with.
+- [x] Familiarize yourself with [Git](https://www.atlassian.com/git) & our project structure by reading through the [development documentation](development.md)
+- [x] Join the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord
+- [x] Choose an issue to work on! This can be achieved by asking in the #dev-chat channel, tackling a [good first issue](https://github.com/invoke-ai/InvokeAI/contribute) or finding an item on the [roadmap](https://github.com/orgs/invoke-ai/projects/7). If nothing in any of those places catches your eye, feel free to work on something of interest to you!
+- [x] Make your first Pull Request with the guide below
+- [x] Happy development! Don't be afraid to ask for help - we're happy to help you contribute!
+
+
+## How do I make a contribution?
+
+Never made an open source contribution before? Wondering how contributions work in our project? Here's a quick rundown!
+
+Before starting these steps, ensure you have your local environment [configured for development](../LOCAL_DEVELOPMENT.md).
+
+1. Find a [good first issue](https://github.com/invoke-ai/InvokeAI/contribute) that you are interested in addressing or a feature that you would like to add. Then, reach out to our team in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord to ensure you are setup for success.
+2. Fork the [InvokeAI](https://github.com/invoke-ai/InvokeAI) repository to your GitHub profile. This means that you will have a copy of the repository under **your-GitHub-username/InvokeAI**.
+3. Clone the repository to your local machine using:
+```bash
+git clone https://github.com/your-GitHub-username/InvokeAI.git
+```
+If you're unfamiliar with using Git through the commandline, [GitHub Desktop](https://desktop.github.com) is a easy-to-use alternative with a UI. You can do all the same steps listed here, but through the interface.
+4. Create a new branch for your fix using:
+```bash
+git checkout -b branch-name-here
+```
+5. Make the appropriate changes for the issue you are trying to address or the feature that you want to add.
+6. Add the file contents of the changed files to the "snapshot" git uses to manage the state of the project, also known as the index:
+```bash
+git add -A
+```
+7. Store the contents of the index with a descriptive message.
+```bash
+git commit -m "Insert a short message of the changes made here"
+```
+8. Push the changes to the remote repository using
+```bash
+git push origin branch-name-here
+```
+9. Submit a pull request to the **main** branch of the InvokeAI repository. If you're not sure how to, [follow this guide](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request)
+10. Title the pull request with a short description of the changes made and the issue or bug number associated with your change. For example, you can title an issue like so "Added more log outputting to resolve #1234".
+11. In the description of the pull request, explain the changes that you made, any issues you think exist with the pull request you made, and any questions you have for the maintainer. It's OK if your pull request is not perfect (no pull request is), the reviewer will be able to help you fix any problems and improve it!
+12. Wait for the pull request to be reviewed by other collaborators.
+13. Make changes to the pull request if the reviewer(s) recommend them.
+14. Celebrate your success after your pull request is merged!
+
+If you’d like to learn more about contributing to Open Source projects, here is a [Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github).
+
+
+## Best Practices:
+* Keep your pull requests small. Smaller pull requests are more likely to be accepted and merged
+* Comments! Commenting your code helps reviewers easily understand your contribution
+* Use Python and Typescript’s typing systems, and consider using an editor with [LSP](https://microsoft.github.io/language-server-protocol/) support to streamline development
+* Make all communications public. This ensure knowledge is shared with the whole community
+
+
+## **Where can I go for help?**
+
+If you need help, you can ask questions in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord.
+
+For frontend related work, **@pyschedelicious** is the best person to reach out to.
+
+For backend related work, please reach out to **@blessedcoolant**, **@lstein**, **@StAlKeR7779** or **@pyschedelicious**.
diff --git a/docs/features/CONCEPTS.md b/docs/features/CONCEPTS.md
index 63f52d8a20..df9ee5bd26 100644
--- a/docs/features/CONCEPTS.md
+++ b/docs/features/CONCEPTS.md
@@ -21,8 +21,8 @@ TI files that you'll encounter are `.pt` and `.bin` files, which are produced by
different TI training packages. InvokeAI supports both formats, but its
[built-in TI training system](TRAINING.md) produces `.pt`.
-The [Hugging Face company](https://huggingface.co/sd-concepts-library) has
-amassed a large ligrary of >800 community-contributed TI files covering a
+[Hugging Face](https://huggingface.co/sd-concepts-library) has
+amassed a large library of >800 community-contributed TI files covering a
broad range of subjects and styles. You can also install your own or others' TI files
by placing them in the designated directory for the compatible model type
diff --git a/docs/features/CONTROLNET.md b/docs/features/CONTROLNET.md
index faa2d7cdf3..42ed43146e 100644
--- a/docs/features/CONTROLNET.md
+++ b/docs/features/CONTROLNET.md
@@ -104,7 +104,7 @@ The OpenPose control model allows for the identification of the general pose of
The MediaPipe Face identification processor is able to clearly identify facial features in order to capture vivid expressions of human faces.
-**Tile (experimental)**:
+**Tile**:
The Tile model fills out details in the image to match the image, rather than the prompt. The Tile Model is a versatile tool that offers a range of functionalities. Its primary capabilities can be boiled down to two main behaviors:
@@ -117,8 +117,6 @@ The Tile Model can be a powerful tool in your arsenal for enhancing image qualit
With Pix2Pix, you can input an image into the controlnet, and then "instruct" the model to change it using your prompt. For example, you can say "Make it winter" to add more wintry elements to a scene.
-**Inpaint**: Coming Soon - Currently this model is available but not functional on the Canvas. An upcoming release will provide additional capabilities for using this model when inpainting.
-
Each of these models can be adjusted and combined with other ControlNet models to achieve different results, giving you even more control over your image generation process.
diff --git a/docs/features/MODEL_MERGING.md b/docs/features/MODEL_MERGING.md
index 36e15ad0e4..6adf4db16a 100644
--- a/docs/features/MODEL_MERGING.md
+++ b/docs/features/MODEL_MERGING.md
@@ -2,17 +2,50 @@
title: Model Merging
---
-# :material-image-off: Model Merging
-
-## How to Merge Models
-
-As of version 2.3, InvokeAI comes with a script that allows you to
-merge two or three diffusers-type models into a new merged model. The
+InvokeAI provides the ability to merge two or three diffusers-type models into a new merged model. The
resulting model will combine characteristics of the original, and can
be used to teach an old model new tricks.
+## How to Merge Models
+
+Model Merging can be be done by navigating to the Model Manager and clicking the "Merge Models" tab. From there, you can select the models and settings you want to use to merge th models.
+
+## Settings
+
+* Model Selection: there are three multiple choice fields that
+ display all the diffusers-style models that InvokeAI knows about.
+ If you do not see the model you are looking for, then it is probably
+ a legacy checkpoint model and needs to be converted using the
+ `invoke` command-line client and its `!optimize` command. You
+ must select at least two models to merge. The third can be left at
+ "None" if you desire.
+
+* Alpha: This is the ratio to use when combining models. It ranges
+ from 0 to 1. The higher the value, the more weight is given to the
+ 2d and (optionally) 3d models. So if you have two models named "A"
+ and "B", an alpha value of 0.25 will give you a merged model that is
+ 25% A and 75% B.
+
+* Interpolation Method: This is the method used to combine
+ weights. The options are "weighted_sum" (the default), "sigmoid",
+ "inv_sigmoid" and "add_difference". Each produces slightly different
+ results. When three models are in use, only "add_difference" is
+ available.
+
+* Save Location: The location you want the merged model to be saved in. Default is in the InvokeAI root folder
+
+* Name for merged model: This is the name for the new model. Please
+ use InvokeAI conventions - only alphanumeric letters and the
+ characters ".+-".
+
+* Ignore Mismatches / Force: Not all models are compatible with each other. The merge
+ script will check for compatibility and refuse to merge ones that
+ are incompatible. Set this checkbox to try merging anyway.
+
+
+
You may run the merge script by starting the invoke launcher
-(`invoke.sh` or `invoke.bat`) and choosing the option for _merge
+(`invoke.sh` or `invoke.bat`) and choosing the option (4) for _merge
models_. This will launch a text-based interactive user interface that
prompts you to select the models to merge, how to merge them, and the
merged model name.
@@ -40,34 +73,4 @@ this to get back.
If the merge runs successfully, it will create a new diffusers model
under the selected name and register it with InvokeAI.
-## The Settings
-
-* Model Selection -- there are three multiple choice fields that
- display all the diffusers-style models that InvokeAI knows about.
- If you do not see the model you are looking for, then it is probably
- a legacy checkpoint model and needs to be converted using the
- `invoke` command-line client and its `!optimize` command. You
- must select at least two models to merge. The third can be left at
- "None" if you desire.
-
-* Alpha -- This is the ratio to use when combining models. It ranges
- from 0 to 1. The higher the value, the more weight is given to the
- 2d and (optionally) 3d models. So if you have two models named "A"
- and "B", an alpha value of 0.25 will give you a merged model that is
- 25% A and 75% B.
-
-* Interpolation Method -- This is the method used to combine
- weights. The options are "weighted_sum" (the default), "sigmoid",
- "inv_sigmoid" and "add_difference". Each produces slightly different
- results. When three models are in use, only "add_difference" is
- available. (TODO: cite a reference that describes what these
- interpolation methods actually do and how to decide among them).
-
-* Force -- Not all models are compatible with each other. The merge
- script will check for compatibility and refuse to merge ones that
- are incompatible. Set this checkbox to try merging anyway.
-
-* Name for merged model - This is the name for the new model. Please
- use InvokeAI conventions - only alphanumeric letters and the
- characters ".+-".
diff --git a/docs/features/PROMPTS.md b/docs/features/PROMPTS.md
index 66af903072..be11e4cce6 100644
--- a/docs/features/PROMPTS.md
+++ b/docs/features/PROMPTS.md
@@ -142,7 +142,7 @@ Prompt2prompt `.swap()` is not compatible with xformers, which will be temporari
The `prompt2prompt` code is based off
[bloc97's colab](https://github.com/bloc97/CrossAttentionControl).
-### Escaping parentheses () and speech marks ""
+### Escaping parentheses and speech marks
If the model you are using has parentheses () or speech marks "" as part of its
syntax, you will need to "escape" these using a backslash, so that`(my_keyword)`
@@ -246,7 +246,7 @@ To create a Dynamic Prompt, follow these steps:
Within the braces, separate each option using a vertical bar |.
If you want to include multiple options from a single group, prefix with the desired number and $$.
-For instance: A {house|apartment|lodge|cottage} in {summer|winter|autumn|spring} designed in {2$$style1|style2|style3}.
+For instance: A {house|apartment|lodge|cottage} in {summer|winter|autumn|spring} designed in {style1|style2|style3}.
### How Dynamic Prompts Work
Once a Dynamic Prompt is configured, the system generates an array of combinations using the options provided. Each group of options in curly braces is treated independently, with the system selecting one option from each group. For a prefixed set (e.g., 2$$), the system will select two distinct options.
@@ -273,3 +273,36 @@ Below are some useful strategies for creating Dynamic Prompts:
Experiment with different quantities for the prefix. For example, 3$$ will select three distinct options.
Be aware of coherence in your prompts. Although the system can generate all possible combinations, not all may semantically make sense. Therefore, carefully choose the options for each group.
Always review and fine-tune the generated prompts as needed. While Dynamic Prompts can help you generate a multitude of combinations, the final polishing and refining remain in your hands.
+
+
+## SDXL Prompting
+
+Prompting with SDXL is slightly different than prompting with SD1.5 or SD2.1 models - SDXL expects a prompt _and_ a style.
+
+
+### Prompting
+
+
+In the prompt box, enter a positive or negative prompt as you normally would.
+
+For the style box you can enter a style that you want the image to be generated in. You can use styles from this example list, or any other style you wish: anime, photographic, digital art, comic book, fantasy art, analog film, neon punk, isometric, low poly, origami, line art, cinematic, 3d model, pixel art, etc.
+
+
+### Concatenated Prompts
+
+
+InvokeAI also has the option to concatenate the prompt and style inputs, by pressing the "link" button in the Positive Prompt box.
+
+This concatenates the prompt & style inputs, and passes the joined prompt and style to the SDXL model.
+![SDXL concatenated prompt boxes in InvokeAI](../assets/prompt_syntax/sdxl-prompt-concatenated.png)
+
+
+
+
+
+
+
diff --git a/docs/features/TRAINING.md b/docs/features/TRAINING.md
index 41197a334f..7be9aff0f2 100644
--- a/docs/features/TRAINING.md
+++ b/docs/features/TRAINING.md
@@ -43,27 +43,22 @@ into the directory
InvokeAI 2.3 and higher comes with a text console-based training front
end. From within the `invoke.sh`/`invoke.bat` Invoke launcher script,
-start the front end by selecting choice (3):
+start training tool selecting choice (3):
```sh
-Do you want to generate images using the
-1: Browser-based UI
-2: Command-line interface
-3: Run textual inversion training
-4: Merge models (diffusers type only)
-5: Download and install models
-6: Change InvokeAI startup options
-7: Re-run the configure script to fix a broken install
-8: Open the developer console
-9: Update InvokeAI
-10: Command-line help
-Q: Quit
-
-Please enter 1-10, Q: [1]
+1 "Generate images with a browser-based interface"
+2 "Explore InvokeAI nodes using a command-line interface"
+3 "Textual inversion training"
+4 "Merge models (diffusers type only)"
+5 "Download and install models"
+6 "Change InvokeAI startup options"
+7 "Re-run the configure script to fix a broken install or to complete a major upgrade"
+8 "Open the developer console"
+9 "Update InvokeAI"
```
-From the command line, with the InvokeAI virtual environment active,
-you can launch the front end with the command `invokeai-ti --gui`.
+Alternatively, you can select option (8) or from the command line, with the InvokeAI virtual environment active,
+you can then launch the front end with the command `invokeai-ti --gui`.
This will launch a text-based front end that will look like this:
diff --git a/docs/installation/020_INSTALL_MANUAL.md b/docs/installation/020_INSTALL_MANUAL.md
index fd0fbe35ed..865253bb14 100644
--- a/docs/installation/020_INSTALL_MANUAL.md
+++ b/docs/installation/020_INSTALL_MANUAL.md
@@ -287,7 +287,7 @@ manager, please follow these steps:
Leave off the `--gui` option to run the script using command-line arguments. Pass the `--help` argument
to get usage instructions.
-### Developer Install
+## Developer Install
If you have an interest in how InvokeAI works, or you would like to
add features or bugfixes, you are encouraged to install the source
@@ -296,13 +296,14 @@ code for InvokeAI. For this to work, you will need to install the
on your system, please see the [Git Installation
Guide](https://github.com/git-guides/install-git)
+1. Create a fork of the InvokeAI repository through the GitHub UI or [this link](https://github.com/invoke-ai/InvokeAI/fork)
1. From the command line, run this command:
```bash
- git clone https://github.com/invoke-ai/InvokeAI.git
+ git clone https://github.com/
/InvokeAI.git
```
This will create a directory named `InvokeAI` and populate it with the
- full source code from the InvokeAI repository.
+ full source code from your fork of the InvokeAI repository.
2. Activate the InvokeAI virtual environment as per step (4) of the manual
installation protocol (important!)
diff --git a/docs/installation/INSTALLATION.md b/docs/installation/INSTALLATION.md
index ec5e2492b6..8ac2c56c48 100644
--- a/docs/installation/INSTALLATION.md
+++ b/docs/installation/INSTALLATION.md
@@ -17,14 +17,32 @@ This fork is supported across Linux, Windows and Macintosh. Linux users can use
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
driver).
-### [Installation Getting Started Guide](installation)
-#### **[Automated Installer](010_INSTALL_AUTOMATED.md)**
+
+## **[Automated Installer](010_INSTALL_AUTOMATED.md)**
✅ This is the recommended installation method for first-time users.
-#### [Manual Installation](020_INSTALL_MANUAL.md)
-This method is recommended for experienced users and developers
-#### [Docker Installation](040_INSTALL_DOCKER.md)
-This method is recommended for those familiar with running Docker containers
-### Other Installation Guides
+
+ This is a script that will install all of InvokeAI's essential
+ third party libraries and InvokeAI itself. It includes access to a
+ "developer console" which will help us debug problems with you and
+ give you to access experimental features.
+
+## **[Manual Installation](020_INSTALL_MANUAL.md)**
+This method is recommended for experienced users and developers.
+
+ In this method you will manually run the commands needed to install
+ InvokeAI and its dependencies. We offer two recipes: one suited to
+ those who prefer the `conda` tool, and one suited to those who prefer
+ `pip` and Python virtual environments. In our hands the pip install
+ is faster and more reliable, but your mileage may vary.
+ Note that the conda installation method is currently deprecated and
+ will not be supported at some point in the future.
+
+## **[Docker Installation](040_INSTALL_DOCKER.md)**
+This method is recommended for those familiar with running Docker containers.
+
+We offer a method for creating Docker containers containing InvokeAI and its dependencies. This method is recommended for individuals with experience with Docker containers and understand the pluses and minuses of a container-based install.
+
+## Other Installation Guides
- [PyPatchMatch](060_INSTALL_PATCHMATCH.md)
- [XFormers](070_INSTALL_XFORMERS.md)
- [CUDA and ROCm Drivers](030_INSTALL_CUDA_AND_ROCM.md)
@@ -63,43 +81,3 @@ images in full-precision mode:
- GTX 1650 series cards
- GTX 1660 series cards
-## Installation options
-
-1. [Automated Installer](010_INSTALL_AUTOMATED.md)
-
- This is a script that will install all of InvokeAI's essential
- third party libraries and InvokeAI itself. It includes access to a
- "developer console" which will help us debug problems with you and
- give you to access experimental features.
-
-
- ✅ This is the recommended option for first time users.
-
-2. [Manual Installation](020_INSTALL_MANUAL.md)
-
- In this method you will manually run the commands needed to install
- InvokeAI and its dependencies. We offer two recipes: one suited to
- those who prefer the `conda` tool, and one suited to those who prefer
- `pip` and Python virtual environments. In our hands the pip install
- is faster and more reliable, but your mileage may vary.
- Note that the conda installation method is currently deprecated and
- will not be supported at some point in the future.
-
- This method is recommended for users who have previously used `conda`
- or `pip` in the past, developers, and anyone who wishes to remain on
- the cutting edge of future InvokeAI development and is willing to put
- up with occasional glitches and breakage.
-
-3. [Docker Installation](040_INSTALL_DOCKER.md)
-
- We also offer a method for creating Docker containers containing
- InvokeAI and its dependencies. This method is recommended for
- individuals with experience with Docker containers and understand
- the pluses and minuses of a container-based install.
-
-## Quick Guides
-
-* [Installing CUDA and ROCm Drivers](./030_INSTALL_CUDA_AND_ROCM.md)
-* [Installing XFormers](./070_INSTALL_XFORMERS.md)
-* [Installing PyPatchMatch](./060_INSTALL_PATCHMATCH.md)
-* [Installing New Models](./050_INSTALLING_MODELS.md)
diff --git a/docs/nodes/NODES.md b/docs/nodes/NODES.md
index 9455e6bdcf..1abd0b1ac4 100644
--- a/docs/nodes/NODES.md
+++ b/docs/nodes/NODES.md
@@ -1,13 +1,32 @@
-# Using the Node Editor
+# Using the Workflow Editor
-The nodes editor is a blank canvas allowing for the use of individual functions and image transformations to control the image generation workflow. Nodes take in inputs on the left side of the node, and return an output on the right side of the node. A node graph is composed of multiple nodes that are connected together to create a workflow. Nodes' inputs and outputs are connected by dragging connectors from node to node. Inputs and outputs are color coded for ease of use.
+The workflow editor is a blank canvas allowing for the use of individual functions and image transformations to control the image generation workflow. Nodes take in inputs on the left side of the node, and return an output on the right side of the node. A node graph is composed of multiple nodes that are connected together to create a workflow. Nodes' inputs and outputs are connected by dragging connectors from node to node. Inputs and outputs are color coded for ease of use.
-To better understand how nodes are used, think of how an electric power bar works. It takes in one input (electricity from a wall outlet) and passes it to multiple devices through multiple outputs. Similarly, a node could have multiple inputs and outputs functioning at the same (or different) time, but all node outputs pass information onward like a power bar passes electricity. Not all outputs are compatible with all inputs, however - Each node has different constraints on how it is expecting to input/output information. In general, node outputs are colour-coded to match compatible inputs of other nodes.
+If you're not familiar with Diffusion, take a look at our [Diffusion Overview.](../help/diffusion.md) Understanding how diffusion works will enable you to more easily use the Workflow Editor and build workflows to suit your needs.
+
+## UI Features
+
+### Linear View
+The Workflow Editor allows you to create a UI for your workflow, to make it easier to iterate on your generations.
+
+To add an input to the Linear UI, right click on the input and select "Add to Linear View".
+
+The Linear UI View will also be part of the saved workflow, allowing you share workflows and enable other to use them, regardless of complexity.
+
+![linearview](../assets/nodes/linearview.png)
+
+### Renaming Fields and Nodes
+Any node or input field can be renamed in the workflow editor. If the input field you have renamed has been added to the Linear View, the changed name will be reflected in the Linear View and the node.
+
+### Managing Nodes
+
+* Ctrl+C to copy a node
+* Ctrl+V to paste a node
+* Backspace/Delete to delete a node
+* Shift+Click to drag and select multiple nodes
-If you're not familiar with Diffusion, take a look at our [Diffusion Overview.](../help/diffusion.md) Understanding how diffusion works will enable you to more easily use the Nodes Editor and build workflows to suit your needs.
-
-## Important Concepts
+## Important Concepts
There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. Note that the screenshots below aren't examples of complete functioning node graphs (see Examples).
@@ -37,7 +56,7 @@ It is common to want to use both the same seed (for continuity) and random seeds
### ControlNet
-The ControlNet node outputs a Control, which can be provided as input to non-image *ToLatents nodes. Depending on the type of ControlNet desired, ControlNet nodes usually require an image processor node, such as a Canny Processor or Depth Processor, which prepares an input image for use with ControlNet.
+The ControlNet node outputs a Control, which can be provided as input to a Denoise Latents node. Depending on the type of ControlNet desired, ControlNet nodes usually require an image processor node, such as a Canny Processor or Depth Processor, which prepares an input image for use with ControlNet.
![groupscontrol](../assets/nodes/groupscontrol.png)
@@ -59,10 +78,9 @@ Iteration is a common concept in any processing, and means to repeat a process w
![groupsiterate](../assets/nodes/groupsiterate.png)
-### Multiple Image Generation + Random Seeds
+### Batch / Multiple Image Generation + Random Seeds
-Multiple image generation in the node editor is done using the RandomRange node. In this case, the 'Size' field represents the number of images to generate. As RandomRange produces a collection of integers, we need to add the Iterate node to iterate through the collection.
-
-To control seeds across generations takes some care. The first row in the screenshot will generate multiple images with different seeds, but using the same RandomRange parameters across invocations will result in the same group of random seeds being used across the images, producing repeatable results. In the second row, adding the RandomInt node as input to RandomRange's 'Seed' edge point will ensure that seeds are varied across all images across invocations, producing varied results.
+Batch or multiple image generation in the workflow editor is done using the RandomRange node. In this case, the 'Size' field represents the number of images to generate, meaning this example will generate 4 images. As RandomRange produces a collection of integers, we need to add the Iterate node to iterate through the collection. This noise can then be fed to the Denoise Latents node for it to iterate through the denoising process with the different seeds provided.
![groupsmultigenseeding](../assets/nodes/groupsmultigenseeding.png)
+
diff --git a/docs/nodes/communityNodes.md b/docs/nodes/communityNodes.md
index c7456fcd4b..c48c971098 100644
--- a/docs/nodes/communityNodes.md
+++ b/docs/nodes/communityNodes.md
@@ -4,9 +4,9 @@ These are nodes that have been developed by the community, for the community. If
If you'd like to submit a node for the community, please refer to the [node creation overview](contributingNodes.md).
-To download a node, simply download the `.py` node file from the link and add it to the `invokeai/app/invocations` folder in your Invoke AI install location. Along with the node, an example node graph should be provided to help you get started with the node.
+To download a node, simply download the `.py` node file from the link and add it to the `invokeai/app/invocations` folder in your Invoke AI install location. If you used the automated installation, this can be found inside the `.venv` folder. Along with the node, an example node graph should be provided to help you get started with the node.
-To use a community node graph, download the the `.json` node graph file and load it into Invoke AI via the **Load Nodes** button on the Node Editor.
+To use a community workflow, download the the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor.
## Community Nodes
diff --git a/docs/nodes/contributingNodes.md b/docs/nodes/contributingNodes.md
index a34d429cd8..c58a56e4e6 100644
--- a/docs/nodes/contributingNodes.md
+++ b/docs/nodes/contributingNodes.md
@@ -4,10 +4,10 @@ To learn about the specifics of creating a new node, please visit our [Node crea
Once you’ve created a node and confirmed that it behaves as expected locally, follow these steps:
-- Make sure the node is contained in a new Python (.py) file
-- Submit a pull request with a link to your node in GitHub against the `nodes` branch to add the node to the [Community Nodes](Community Nodes) list
- - Make sure you are following the template below and have provided all relevant details about the node and what it does.
-- A maintainer will review the pull request and node. If the node is aligned with the direction of the project, you might be asked for permission to include it in the core project.
+- Make sure the node is contained in a new Python (.py) file. Preferrably, the node is in a repo with a README detaling the nodes usage & examples to help others more easily use your node.
+- Submit a pull request with a link to your node(s) repo in GitHub against the `main` branch to add the node to the [Community Nodes](communityNodes.md) list
+ - Make sure you are following the template below and have provided all relevant details about the node and what it does. Example output images and workflows are very helpful for other users looking to use your node.
+- A maintainer will review the pull request and node. If the node is aligned with the direction of the project, you may be asked for permission to include it in the core project.
### Community Node Template
diff --git a/docs/nodes/defaultNodes.md b/docs/nodes/defaultNodes.md
index 8bc304216d..b022eeed14 100644
--- a/docs/nodes/defaultNodes.md
+++ b/docs/nodes/defaultNodes.md
@@ -22,6 +22,7 @@ The table below contains a list of the default nodes shipped with InvokeAI and t
|Divide Integers | Divides two numbers|
|Dynamic Prompt | Parses a prompt using adieyal/dynamicprompts' random or combinatorial generator|
|Upscale (RealESRGAN) | Upscales an image using RealESRGAN.|
+|Float Math | Perform basic math operations on two floats|
|Float Primitive Collection | A collection of float primitive values|
|Float Primitive | A float primitive value|
|Float Range | Creates a range|
@@ -29,6 +30,7 @@ The table below contains a list of the default nodes shipped with InvokeAI and t
|Blur Image | Blurs an image|
|Extract Image Channel | Gets a channel from an image.|
|Image Primitive Collection | A collection of image primitive values|
+|Integer Math | Perform basic math operations on two integers|
|Convert Image Mode | Converts an image to a different mode.|
|Crop Image | Crops an image to a specified box. The box can be outside of the image.|
|Image Hue Adjustment | Adjusts the Hue of an image.|
@@ -42,6 +44,8 @@ The table below contains a list of the default nodes shipped with InvokeAI and t
|Paste Image | Pastes an image into another image.|
|ImageProcessor | Base class for invocations that preprocess images for ControlNet|
|Resize Image | Resizes an image to specific dimensions|
+|Round Float | Rounds a float to a specified number of decimal places|
+|Float to Integer | Converts a float to an integer. Optionally rounds to an even multiple of a input number.|
|Scale Image | Scales an image by a factor|
|Image to Latents | Encodes an image into latents.|
|Add Invisible Watermark | Add an invisible watermark to an image|
diff --git a/docs/nodes/exampleWorkflows.md b/docs/nodes/exampleWorkflows.md
index 5f8dabb67c..6b5b0912dc 100644
--- a/docs/nodes/exampleWorkflows.md
+++ b/docs/nodes/exampleWorkflows.md
@@ -1,15 +1,13 @@
# Example Workflows
-TODO: Will update once uploading workflows is available.
+We've curated some example workflows for you to get started with Workflows in InvokeAI
-## Text2Image
+To use them, right click on your desired workflow, press "Download Linked File". You can then use the "Load Workflow" functionality in InvokeAI to load the workflow and start generating images!
-## Image2Image
+If you're interested in finding more workflows, checkout the [#share-your-workflows](https://discord.com/channels/1020123559063990373/1130291608097661000) channel in the InvokeAI Discord.
-## ControlNet
+* [SD1.5 / SD2 Text to Image](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/Text_to_Image.json)
+* [SDXL Text to Image](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/SDXL_Text_to_Image.json)
+* [SDXL (with Refiner) Text to Image](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/SDXL_Text_to_Image.json)
+* [Tiled Upscaling with ControlNet](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/ESRGAN_img2img_upscale w_Canny_ControlNet.json)ß
-## Upscaling
-
-## Inpainting / Outpainting
-
-## LoRAs
diff --git a/docs/workflows/ESRGAN_img2img_upscale w_Canny_ControlNet.json b/docs/workflows/ESRGAN_img2img_upscale w_Canny_ControlNet.json
new file mode 100644
index 0000000000..c963ace025
--- /dev/null
+++ b/docs/workflows/ESRGAN_img2img_upscale w_Canny_ControlNet.json
@@ -0,0 +1,1010 @@
+{
+ "name": "ESRGAN img2img upscale w_ Lineart ControlNet",
+ "author": "InvokeAI",
+ "description": "Sample workflow for using Upscaling with ControlNet with SD1.5",
+ "version": "1.0.1",
+ "contact": "invoke@invoke.ai",
+ "tags": "tiled, upscale controlnet, default",
+ "notes": "",
+ "exposedFields": [
+ {
+ "nodeId": "aba70c57-2495-4ec1-8d23-02b1d11445c7",
+ "fieldName": "model"
+ },
+ {
+ "nodeId": "c394834e-cab7-4c0c-919e-2e35eba7f34e",
+ "fieldName": "prompt"
+ },
+ {
+ "nodeId": "465c7e6e-278f-49b0-87ab-642e88cd076f",
+ "fieldName": "prompt"
+ },
+ {
+ "nodeId": "4e2833b2-5d35-45ec-ae65-89ea1846a3f4",
+ "fieldName": "image"
+ }
+ ],
+ "meta": {
+ "version": "1.0.0"
+ },
+ "nodes": [
+ {
+ "id": "aba70c57-2495-4ec1-8d23-02b1d11445c7",
+ "type": "invocation",
+ "data": {
+ "id": "aba70c57-2495-4ec1-8d23-02b1d11445c7",
+ "type": "main_model_loader",
+ "inputs": {
+ "model": {
+ "id": "4fcc98ee-1c70-4ad3-aaee-df72e0d4ecb9",
+ "name": "model",
+ "type": "MainModelField",
+ "fieldKind": "input",
+ "label": "",
+ "value": {
+ "model_name": "stable-diffusion-v1-5",
+ "base_model": "sd-1",
+ "model_type": "main"
+ }
+ }
+ },
+ "outputs": {
+ "unet": {
+ "id": "3ecbc0ff-a7b0-43de-a81f-039210cbda50",
+ "name": "unet",
+ "type": "UNetField",
+ "fieldKind": "output"
+ },
+ "clip": {
+ "id": "4e55bd72-5409-4fba-9929-4177e4ae9c34",
+ "name": "clip",
+ "type": "ClipField",
+ "fieldKind": "output"
+ },
+ "vae": {
+ "id": "0095495c-4424-451f-a8f5-26dc840a3c56",
+ "name": "vae",
+ "type": "VaeField",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 202,
+ "position": {
+ "x": 175,
+ "y": 300
+ }
+ },
+ {
+ "id": "167ffc36-4cb7-425c-ae55-e88f80a1d6fe",
+ "type": "invocation",
+ "data": {
+ "id": "167ffc36-4cb7-425c-ae55-e88f80a1d6fe",
+ "type": "noise",
+ "inputs": {
+ "seed": {
+ "id": "a64e30f7-f9f4-4d67-a5aa-af5924205f92",
+ "name": "seed",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "width": {
+ "id": "054eaa35-63af-41c9-b13f-e9c2aaeeca43",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 512
+ },
+ "height": {
+ "id": "6e9a642c-fe1d-477f-ae66-1706471f7d73",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 512
+ },
+ "use_cpu": {
+ "id": "caa3c8ba-aed5-44d8-88d9-4f48a75d59a4",
+ "name": "use_cpu",
+ "type": "boolean",
+ "fieldKind": "input",
+ "label": "",
+ "value": true
+ }
+ },
+ "outputs": {
+ "noise": {
+ "id": "133dbb18-f862-430b-b9a0-613aa8e61c7d",
+ "name": "noise",
+ "type": "LatentsField",
+ "fieldKind": "output"
+ },
+ "width": {
+ "id": "2e31961a-af0c-497b-9ae2-429cb6c2f5a1",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "output"
+ },
+ "height": {
+ "id": "eca33bb8-37bb-4bfd-b7c7-3dba01207374",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": false,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 32,
+ "position": {
+ "x": 1400,
+ "y": 650
+ }
+ },
+ {
+ "id": "c2172a8b-1b5f-4330-acbe-dd2565c3b988",
+ "type": "invocation",
+ "data": {
+ "id": "c2172a8b-1b5f-4330-acbe-dd2565c3b988",
+ "type": "l2i",
+ "inputs": {
+ "tiled": {
+ "id": "406bccc1-d757-4578-b46e-be6141c03385",
+ "name": "tiled",
+ "type": "boolean",
+ "fieldKind": "input",
+ "label": "",
+ "value": false
+ },
+ "fp32": {
+ "id": "960ec115-547c-45c8-af2a-569214d9409c",
+ "name": "fp32",
+ "type": "boolean",
+ "fieldKind": "input",
+ "label": "",
+ "value": false
+ },
+ "latents": {
+ "id": "c8658cc5-3762-499d-9cad-eceb8f9dde4e",
+ "name": "latents",
+ "type": "LatentsField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "vae": {
+ "id": "c35cf05c-0985-4bc4-9b05-0c89799bb888",
+ "name": "vae",
+ "type": "VaeField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "image": {
+ "id": "25afb2bc-c964-4cd3-8332-4d0e9ea65d3a",
+ "name": "image",
+ "type": "ImageField",
+ "fieldKind": "output"
+ },
+ "width": {
+ "id": "aff7065e-8ce0-44aa-bb6e-d16925279fdd",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "output"
+ },
+ "height": {
+ "id": "6f5e8706-7a9b-4455-beb8-3d2e2ceabbc2",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": true,
+ "isIntermediate": false
+ },
+ "width": 320,
+ "height": 266,
+ "position": {
+ "x": 2225,
+ "y": 450
+ }
+ },
+ {
+ "id": "c394834e-cab7-4c0c-919e-2e35eba7f34e",
+ "type": "invocation",
+ "data": {
+ "id": "c394834e-cab7-4c0c-919e-2e35eba7f34e",
+ "type": "compel",
+ "inputs": {
+ "prompt": {
+ "id": "fef594dd-07d3-47e6-97d0-1803b55a0f26",
+ "name": "prompt",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Positive Prompt",
+ "value": "tiger"
+ },
+ "clip": {
+ "id": "e5639045-aa13-48c2-a172-869774aecab6",
+ "name": "clip",
+ "type": "ClipField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "conditioning": {
+ "id": "3249093e-0bc4-42a7-8a9b-2172fb89e915",
+ "name": "conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 235,
+ "position": {
+ "x": 975,
+ "y": -25
+ }
+ },
+ {
+ "id": "465c7e6e-278f-49b0-87ab-642e88cd076f",
+ "type": "invocation",
+ "data": {
+ "id": "465c7e6e-278f-49b0-87ab-642e88cd076f",
+ "type": "compel",
+ "inputs": {
+ "prompt": {
+ "id": "9a3ac8ea-7655-4806-ab8c-b7a18a253181",
+ "name": "prompt",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Negative Prompt",
+ "value": ""
+ },
+ "clip": {
+ "id": "1d348def-bb7d-4bab-b983-9f55c933ea67",
+ "name": "clip",
+ "type": "ClipField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "conditioning": {
+ "id": "f92febc1-67c4-45d4-b2e4-9ba470e4ccef",
+ "name": "conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 235,
+ "position": {
+ "x": 975,
+ "y": 250
+ }
+ },
+ {
+ "id": "4e2833b2-5d35-45ec-ae65-89ea1846a3f4",
+ "type": "invocation",
+ "data": {
+ "id": "4e2833b2-5d35-45ec-ae65-89ea1846a3f4",
+ "type": "image",
+ "inputs": {
+ "image": {
+ "id": "0b2f59b4-9994-4c99-9309-5434c746abb9",
+ "name": "image",
+ "type": "ImageField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "image": {
+ "id": "9a41092e-50ec-4530-95b2-33d9207a8f50",
+ "name": "image",
+ "type": "ImageField",
+ "fieldKind": "output"
+ },
+ "width": {
+ "id": "6462e3b2-6450-45fd-9fee-0fbe25537ed0",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "output"
+ },
+ "height": {
+ "id": "94c0d477-8753-4976-ba91-b9eb7ad71285",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 225,
+ "position": {
+ "x": 50,
+ "y": 750
+ }
+ },
+ {
+ "id": "a7f1336d-516d-4735-826f-3c633dfaa5e8",
+ "type": "invocation",
+ "data": {
+ "id": "a7f1336d-516d-4735-826f-3c633dfaa5e8",
+ "type": "i2l",
+ "inputs": {
+ "image": {
+ "id": "b69df743-8045-4ffe-bb14-71b7f9c17c5f",
+ "name": "image",
+ "type": "ImageField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "vae": {
+ "id": "a98a3497-34c5-46f7-9eaf-c24eab5d481a",
+ "name": "vae",
+ "type": "VaeField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "tiled": {
+ "id": "706dc2c8-1820-42a7-98e1-bcd631737e7b",
+ "name": "tiled",
+ "type": "boolean",
+ "fieldKind": "input",
+ "label": "",
+ "value": false
+ },
+ "fp32": {
+ "id": "5fc3bc04-c66e-46db-be2a-470c9d64b0d8",
+ "name": "fp32",
+ "type": "boolean",
+ "fieldKind": "input",
+ "label": "",
+ "value": false
+ }
+ },
+ "outputs": {
+ "latents": {
+ "id": "50b89da5-e7ed-45cd-b74e-83c07e510ccd",
+ "name": "latents",
+ "type": "LatentsField",
+ "fieldKind": "output"
+ },
+ "width": {
+ "id": "7be69dad-837f-4c98-8ae2-c7aacaa44b52",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "output"
+ },
+ "height": {
+ "id": "66e3c7ec-3848-4afb-84bb-ff3a09e47089",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 299,
+ "position": {
+ "x": 975,
+ "y": 525
+ }
+ },
+ {
+ "id": "61613ab2-784d-4a5c-8576-18fd5da065ef",
+ "type": "invocation",
+ "data": {
+ "id": "61613ab2-784d-4a5c-8576-18fd5da065ef",
+ "type": "denoise_latents",
+ "inputs": {
+ "noise": {
+ "id": "6b894db2-ee45-45b6-b531-573311ddea73",
+ "name": "noise",
+ "type": "LatentsField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "steps": {
+ "id": "7e7693fe-6c0c-464a-8535-2ed517766c19",
+ "name": "steps",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 64
+ },
+ "cfg_scale": {
+ "id": "890ba738-690c-44a3-97cd-c589257b531a",
+ "name": "cfg_scale",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 7.5
+ },
+ "denoising_start": {
+ "id": "0e157ca8-5c06-499a-be6d-283fb834df32",
+ "name": "denoising_start",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0.7
+ },
+ "denoising_end": {
+ "id": "915bc497-cb58-40a5-8089-2ece7213be21",
+ "name": "denoising_end",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1
+ },
+ "scheduler": {
+ "id": "694471cd-8708-411b-9d88-2ae725254ff0",
+ "name": "scheduler",
+ "type": "Scheduler",
+ "fieldKind": "input",
+ "label": "",
+ "value": "euler_a"
+ },
+ "control": {
+ "id": "b14cff5e-30ea-4d61-b27c-8247063699ad",
+ "name": "control",
+ "type": "ControlField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "latents": {
+ "id": "0528dbfd-d661-4c73-b493-722b27e37201",
+ "name": "latents",
+ "type": "LatentsField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "denoise_mask": {
+ "id": "ea7d6070-4f00-4b77-b49e-ffe1ca0e3fea",
+ "name": "denoise_mask",
+ "type": "DenoiseMaskField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "positive_conditioning": {
+ "id": "56a3fa76-9eb4-4680-8a4d-169696034525",
+ "name": "positive_conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "negative_conditioning": {
+ "id": "0eed85bc-d5bd-4de4-8155-0c08f1ac5e32",
+ "name": "negative_conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "unet": {
+ "id": "87ab9e45-a92f-4d47-a9f6-a65fe23373de",
+ "name": "unet",
+ "type": "UNetField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "latents": {
+ "id": "d12e0bd6-7d35-4d12-84c0-540c26ba01c8",
+ "name": "latents",
+ "type": "LatentsField",
+ "fieldKind": "output"
+ },
+ "width": {
+ "id": "201ca444-2d10-4bab-b6be-e1b5be97e1b0",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "output"
+ },
+ "height": {
+ "id": "74ce84a5-b68c-4503-8a7d-bc017024678a",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 558,
+ "position": {
+ "x": 1800,
+ "y": 350
+ }
+ },
+ {
+ "id": "39abdb3e-f8d1-4b0a-bab1-547d9e25b78e",
+ "type": "invocation",
+ "data": {
+ "id": "39abdb3e-f8d1-4b0a-bab1-547d9e25b78e",
+ "type": "rand_int",
+ "inputs": {
+ "low": {
+ "id": "10eff2d0-929f-45ca-a1ba-68c3e742db71",
+ "name": "low",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "high": {
+ "id": "b6e39169-e6ee-496a-8046-5444497036c2",
+ "name": "high",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 2147483647
+ }
+ },
+ "outputs": {
+ "value": {
+ "id": "cc477a7a-36bc-458f-b7ac-6717bac6f12b",
+ "name": "value",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": false,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 32,
+ "position": {
+ "x": 975,
+ "y": 850
+ }
+ },
+ {
+ "id": "08c41d50-fb10-43a9-a58b-fc15ee678a83",
+ "type": "invocation",
+ "data": {
+ "id": "08c41d50-fb10-43a9-a58b-fc15ee678a83",
+ "type": "esrgan",
+ "inputs": {
+ "image": {
+ "id": "01b92996-26aa-412b-9eba-d13cb7b370a8",
+ "name": "image",
+ "type": "ImageField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "model_name": {
+ "id": "3fc7bf6a-e181-4236-b746-48b011351af1",
+ "name": "model_name",
+ "type": "enum",
+ "fieldKind": "input",
+ "label": "",
+ "value": "RealESRGAN_x2plus.pth"
+ }
+ },
+ "outputs": {
+ "image": {
+ "id": "6feb0664-c61d-4fcd-8226-ed81591dcb0c",
+ "name": "image",
+ "type": "ImageField",
+ "fieldKind": "output"
+ },
+ "width": {
+ "id": "61650a80-7d2f-4509-8600-574c5cc6e569",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "output"
+ },
+ "height": {
+ "id": "fcb00841-f068-475e-ac90-0874313fd7fa",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 282,
+ "position": {
+ "x": 475,
+ "y": 725
+ }
+ },
+ {
+ "id": "30598d37-cf80-4e12-896a-7683cf727e77",
+ "type": "invocation",
+ "data": {
+ "id": "30598d37-cf80-4e12-896a-7683cf727e77",
+ "type": "controlnet",
+ "inputs": {
+ "image": {
+ "id": "d44259eb-bc93-4d4b-9665-a7895e5a77ab",
+ "name": "image",
+ "type": "ImageField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "control_model": {
+ "id": "2021065f-d5c9-47ad-bfea-03eea03a19ce",
+ "name": "control_model",
+ "type": "ControlNetModelField",
+ "fieldKind": "input",
+ "label": "",
+ "value": {
+ "model_name": "sd-controlnet-canny",
+ "base_model": "sd-1"
+ }
+ },
+ "control_weight": {
+ "id": "f856e29f-303f-4507-8c39-71ccc636f67c",
+ "name": "control_weight",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1
+ },
+ "begin_step_percent": {
+ "id": "f9187fa7-1510-439d-9c9b-e5fa990639b0",
+ "name": "begin_step_percent",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "end_step_percent": {
+ "id": "3f7c15c3-2b80-49a8-8eec-57d277c37364",
+ "name": "end_step_percent",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1
+ },
+ "control_mode": {
+ "id": "f34cb991-ecb5-423c-865f-9890b2fa9d23",
+ "name": "control_mode",
+ "type": "enum",
+ "fieldKind": "input",
+ "label": "",
+ "value": "balanced"
+ },
+ "resize_mode": {
+ "id": "552142f6-f6a6-4291-803c-68caefec7c6d",
+ "name": "resize_mode",
+ "type": "enum",
+ "fieldKind": "input",
+ "label": "",
+ "value": "just_resize"
+ }
+ },
+ "outputs": {
+ "control": {
+ "id": "6160265b-4a8d-4fc5-8e1f-d793a353d2db",
+ "name": "control",
+ "type": "ControlField",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 480,
+ "position": {
+ "x": 1375,
+ "y": 875
+ }
+ },
+ {
+ "id": "b79732f8-6126-4d06-9c8a-4e84bd5b1ac9",
+ "type": "invocation",
+ "data": {
+ "id": "b79732f8-6126-4d06-9c8a-4e84bd5b1ac9",
+ "type": "vae_loader",
+ "inputs": {
+ "vae_model": {
+ "id": "b68038ae-b8cb-4e29-9581-da50d55af462",
+ "name": "vae_model",
+ "type": "VaeModelField",
+ "fieldKind": "input",
+ "label": "",
+ "value": {
+ "model_name": "sd-vae-ft-mse",
+ "base_model": "sd-1"
+ }
+ }
+ },
+ "outputs": {
+ "vae": {
+ "id": "82f4ba9d-5e3a-4b48-b7a4-37de956663d7",
+ "name": "vae",
+ "type": "VaeField",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 138,
+ "position": {
+ "x": 175,
+ "y": 525
+ }
+ },
+ {
+ "id": "b10d39ab-7bc9-48bc-b883-2fd50920876d",
+ "type": "invocation",
+ "data": {
+ "id": "b10d39ab-7bc9-48bc-b883-2fd50920876d",
+ "type": "canny_image_processor",
+ "inputs": {
+ "image": {
+ "id": "dee9cdf2-9b3f-4d20-8433-ef7d6f6526bd",
+ "name": "image",
+ "type": "ImageField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "low_threshold": {
+ "id": "4eaefd37-9fbe-4b73-8a17-c60d4e1d7e39",
+ "name": "low_threshold",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 100
+ },
+ "high_threshold": {
+ "id": "c394720b-546b-464e-8f53-d20bfda4ee04",
+ "name": "high_threshold",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 200
+ }
+ },
+ "outputs": {
+ "image": {
+ "id": "80fcdf76-dfc3-41a1-99ea-5ab2aa5fde07",
+ "name": "image",
+ "type": "ImageField",
+ "fieldKind": "output"
+ },
+ "width": {
+ "id": "3fa5e5a9-ff60-44d2-8d91-635d0c798f15",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "output"
+ },
+ "height": {
+ "id": "01a10c42-d485-4436-af8b-03d71c59bc8c",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 339,
+ "position": {
+ "x": 925,
+ "y": 925
+ }
+ }
+ ],
+ "edges": [
+ {
+ "source": "39abdb3e-f8d1-4b0a-bab1-547d9e25b78e",
+ "target": "167ffc36-4cb7-425c-ae55-e88f80a1d6fe",
+ "id": "39abdb3e-f8d1-4b0a-bab1-547d9e25b78e-167ffc36-4cb7-425c-ae55-e88f80a1d6fe-collapsed",
+ "type": "collapsed"
+ },
+ {
+ "source": "aba70c57-2495-4ec1-8d23-02b1d11445c7",
+ "sourceHandle": "clip",
+ "target": "c394834e-cab7-4c0c-919e-2e35eba7f34e",
+ "targetHandle": "clip",
+ "id": "reactflow__edge-aba70c57-2495-4ec1-8d23-02b1d11445c7clip-c394834e-cab7-4c0c-919e-2e35eba7f34eclip",
+ "type": "default"
+ },
+ {
+ "source": "aba70c57-2495-4ec1-8d23-02b1d11445c7",
+ "sourceHandle": "clip",
+ "target": "465c7e6e-278f-49b0-87ab-642e88cd076f",
+ "targetHandle": "clip",
+ "id": "reactflow__edge-aba70c57-2495-4ec1-8d23-02b1d11445c7clip-465c7e6e-278f-49b0-87ab-642e88cd076fclip",
+ "type": "default"
+ },
+ {
+ "source": "aba70c57-2495-4ec1-8d23-02b1d11445c7",
+ "sourceHandle": "vae",
+ "target": "a7f1336d-516d-4735-826f-3c633dfaa5e8",
+ "targetHandle": "vae",
+ "id": "reactflow__edge-aba70c57-2495-4ec1-8d23-02b1d11445c7vae-a7f1336d-516d-4735-826f-3c633dfaa5e8vae",
+ "type": "default"
+ },
+ {
+ "source": "aba70c57-2495-4ec1-8d23-02b1d11445c7",
+ "sourceHandle": "unet",
+ "target": "61613ab2-784d-4a5c-8576-18fd5da065ef",
+ "targetHandle": "unet",
+ "id": "reactflow__edge-aba70c57-2495-4ec1-8d23-02b1d11445c7unet-61613ab2-784d-4a5c-8576-18fd5da065efunet",
+ "type": "default"
+ },
+ {
+ "source": "465c7e6e-278f-49b0-87ab-642e88cd076f",
+ "sourceHandle": "conditioning",
+ "target": "61613ab2-784d-4a5c-8576-18fd5da065ef",
+ "targetHandle": "negative_conditioning",
+ "id": "reactflow__edge-465c7e6e-278f-49b0-87ab-642e88cd076fconditioning-61613ab2-784d-4a5c-8576-18fd5da065efnegative_conditioning",
+ "type": "default"
+ },
+ {
+ "source": "c394834e-cab7-4c0c-919e-2e35eba7f34e",
+ "sourceHandle": "conditioning",
+ "target": "61613ab2-784d-4a5c-8576-18fd5da065ef",
+ "targetHandle": "positive_conditioning",
+ "id": "reactflow__edge-c394834e-cab7-4c0c-919e-2e35eba7f34econditioning-61613ab2-784d-4a5c-8576-18fd5da065efpositive_conditioning",
+ "type": "default"
+ },
+ {
+ "source": "167ffc36-4cb7-425c-ae55-e88f80a1d6fe",
+ "sourceHandle": "noise",
+ "target": "61613ab2-784d-4a5c-8576-18fd5da065ef",
+ "targetHandle": "noise",
+ "id": "reactflow__edge-167ffc36-4cb7-425c-ae55-e88f80a1d6fenoise-61613ab2-784d-4a5c-8576-18fd5da065efnoise",
+ "type": "default"
+ },
+ {
+ "source": "61613ab2-784d-4a5c-8576-18fd5da065ef",
+ "sourceHandle": "latents",
+ "target": "c2172a8b-1b5f-4330-acbe-dd2565c3b988",
+ "targetHandle": "latents",
+ "id": "reactflow__edge-61613ab2-784d-4a5c-8576-18fd5da065eflatents-c2172a8b-1b5f-4330-acbe-dd2565c3b988latents",
+ "type": "default"
+ },
+ {
+ "source": "a7f1336d-516d-4735-826f-3c633dfaa5e8",
+ "sourceHandle": "latents",
+ "target": "61613ab2-784d-4a5c-8576-18fd5da065ef",
+ "targetHandle": "latents",
+ "id": "reactflow__edge-a7f1336d-516d-4735-826f-3c633dfaa5e8latents-61613ab2-784d-4a5c-8576-18fd5da065eflatents",
+ "type": "default"
+ },
+ {
+ "source": "39abdb3e-f8d1-4b0a-bab1-547d9e25b78e",
+ "sourceHandle": "value",
+ "target": "167ffc36-4cb7-425c-ae55-e88f80a1d6fe",
+ "targetHandle": "seed",
+ "id": "reactflow__edge-39abdb3e-f8d1-4b0a-bab1-547d9e25b78evalue-167ffc36-4cb7-425c-ae55-e88f80a1d6feseed",
+ "type": "default"
+ },
+ {
+ "source": "a7f1336d-516d-4735-826f-3c633dfaa5e8",
+ "sourceHandle": "width",
+ "target": "167ffc36-4cb7-425c-ae55-e88f80a1d6fe",
+ "targetHandle": "width",
+ "id": "reactflow__edge-a7f1336d-516d-4735-826f-3c633dfaa5e8width-167ffc36-4cb7-425c-ae55-e88f80a1d6fewidth",
+ "type": "default"
+ },
+ {
+ "source": "a7f1336d-516d-4735-826f-3c633dfaa5e8",
+ "sourceHandle": "height",
+ "target": "167ffc36-4cb7-425c-ae55-e88f80a1d6fe",
+ "targetHandle": "height",
+ "id": "reactflow__edge-a7f1336d-516d-4735-826f-3c633dfaa5e8height-167ffc36-4cb7-425c-ae55-e88f80a1d6feheight",
+ "type": "default"
+ },
+ {
+ "source": "4e2833b2-5d35-45ec-ae65-89ea1846a3f4",
+ "sourceHandle": "image",
+ "target": "08c41d50-fb10-43a9-a58b-fc15ee678a83",
+ "targetHandle": "image",
+ "id": "reactflow__edge-4e2833b2-5d35-45ec-ae65-89ea1846a3f4image-08c41d50-fb10-43a9-a58b-fc15ee678a83image",
+ "type": "default"
+ },
+ {
+ "source": "08c41d50-fb10-43a9-a58b-fc15ee678a83",
+ "sourceHandle": "image",
+ "target": "a7f1336d-516d-4735-826f-3c633dfaa5e8",
+ "targetHandle": "image",
+ "id": "reactflow__edge-08c41d50-fb10-43a9-a58b-fc15ee678a83image-a7f1336d-516d-4735-826f-3c633dfaa5e8image",
+ "type": "default"
+ },
+ {
+ "source": "30598d37-cf80-4e12-896a-7683cf727e77",
+ "sourceHandle": "control",
+ "target": "61613ab2-784d-4a5c-8576-18fd5da065ef",
+ "targetHandle": "control",
+ "id": "reactflow__edge-30598d37-cf80-4e12-896a-7683cf727e77control-61613ab2-784d-4a5c-8576-18fd5da065efcontrol",
+ "type": "default"
+ },
+ {
+ "source": "b79732f8-6126-4d06-9c8a-4e84bd5b1ac9",
+ "sourceHandle": "vae",
+ "target": "c2172a8b-1b5f-4330-acbe-dd2565c3b988",
+ "targetHandle": "vae",
+ "id": "reactflow__edge-b79732f8-6126-4d06-9c8a-4e84bd5b1ac9vae-c2172a8b-1b5f-4330-acbe-dd2565c3b988vae",
+ "type": "default"
+ },
+ {
+ "source": "08c41d50-fb10-43a9-a58b-fc15ee678a83",
+ "sourceHandle": "image",
+ "target": "b10d39ab-7bc9-48bc-b883-2fd50920876d",
+ "targetHandle": "image",
+ "id": "reactflow__edge-08c41d50-fb10-43a9-a58b-fc15ee678a83image-b10d39ab-7bc9-48bc-b883-2fd50920876dimage",
+ "type": "default"
+ },
+ {
+ "source": "b10d39ab-7bc9-48bc-b883-2fd50920876d",
+ "sourceHandle": "image",
+ "target": "30598d37-cf80-4e12-896a-7683cf727e77",
+ "targetHandle": "image",
+ "id": "reactflow__edge-b10d39ab-7bc9-48bc-b883-2fd50920876dimage-30598d37-cf80-4e12-896a-7683cf727e77image",
+ "type": "default"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/docs/workflows/SDXL_Text_to_Image.json b/docs/workflows/SDXL_Text_to_Image.json
new file mode 100644
index 0000000000..57f95293eb
--- /dev/null
+++ b/docs/workflows/SDXL_Text_to_Image.json
@@ -0,0 +1,735 @@
+{
+ "name": "SDXL Text to Image",
+ "author": "InvokeAI",
+ "description": "Sample text to image workflow for SDXL",
+ "version": "1.0.1",
+ "contact": "invoke@invoke.ai",
+ "tags": "text2image, SDXL, default",
+ "notes": "",
+ "exposedFields": [
+ {
+ "nodeId": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
+ "fieldName": "model"
+ },
+ {
+ "nodeId": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
+ "fieldName": "prompt"
+ },
+ {
+ "nodeId": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
+ "fieldName": "style"
+ },
+ {
+ "nodeId": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
+ "fieldName": "prompt"
+ },
+ {
+ "nodeId": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
+ "fieldName": "style"
+ },
+ {
+ "nodeId": "87ee6243-fb0d-4f77-ad5f-56591659339e",
+ "fieldName": "steps"
+ }
+ ],
+ "meta": {
+ "version": "1.0.0"
+ },
+ "nodes": [
+ {
+ "id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
+ "type": "sdxl_compel_prompt",
+ "inputs": {
+ "prompt": {
+ "id": "5a6889e6-95cb-462f-8f4a-6b93ae7afaec",
+ "name": "prompt",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Negative Prompt",
+ "value": ""
+ },
+ "style": {
+ "id": "f240d0e6-3a1c-4320-af23-20ebb707c276",
+ "name": "style",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Negative Style",
+ "value": ""
+ },
+ "original_width": {
+ "id": "05af07b0-99a0-4a68-8ad2-697bbdb7fc7e",
+ "name": "original_width",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "original_height": {
+ "id": "2c771996-a998-43b7-9dd3-3792664d4e5b",
+ "name": "original_height",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "crop_top": {
+ "id": "66519dca-a151-4e3e-ae1f-88f1f9877bde",
+ "name": "crop_top",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "crop_left": {
+ "id": "349cf2e9-f3d0-4e16-9ae2-7097d25b6a51",
+ "name": "crop_left",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "target_width": {
+ "id": "44499347-7bd6-4a73-99d6-5a982786db05",
+ "name": "target_width",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "target_height": {
+ "id": "fda359b0-ab80-4f3c-805b-c9f61319d7d2",
+ "name": "target_height",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "clip": {
+ "id": "b447adaf-a649-4a76-a827-046a9fc8d89b",
+ "name": "clip",
+ "type": "ClipField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "clip2": {
+ "id": "86ee4e32-08f9-4baa-9163-31d93f5c0187",
+ "name": "clip2",
+ "type": "ClipField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "conditioning": {
+ "id": "7c10118e-7b4e-4911-b98e-d3ba6347dfd0",
+ "name": "conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "output"
+ }
+ },
+ "label": "SDXL Negative Compel Prompt",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 764,
+ "position": {
+ "x": 1275,
+ "y": -350
+ }
+ },
+ {
+ "id": "55705012-79b9-4aac-9f26-c0b10309785b",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "55705012-79b9-4aac-9f26-c0b10309785b",
+ "type": "noise",
+ "inputs": {
+ "seed": {
+ "id": "6431737c-918a-425d-a3b4-5d57e2f35d4d",
+ "name": "seed",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "width": {
+ "id": "38fc5b66-fe6e-47c8-bba9-daf58e454ed7",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "height": {
+ "id": "16298330-e2bf-4872-a514-d6923df53cbb",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "use_cpu": {
+ "id": "c7c436d3-7a7a-4e76-91e4-c6deb271623c",
+ "name": "use_cpu",
+ "type": "boolean",
+ "fieldKind": "input",
+ "label": "",
+ "value": true
+ }
+ },
+ "outputs": {
+ "noise": {
+ "id": "50f650dc-0184-4e23-a927-0497a96fe954",
+ "name": "noise",
+ "type": "LatentsField",
+ "fieldKind": "output"
+ },
+ "width": {
+ "id": "bb8a452b-133d-42d1-ae4a-3843d7e4109a",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "output"
+ },
+ "height": {
+ "id": "35cfaa12-3b8b-4b7a-a884-327ff3abddd9",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": false,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 32,
+ "position": {
+ "x": 1650,
+ "y": -300
+ }
+ },
+ {
+ "id": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
+ "type": "l2i",
+ "inputs": {
+ "tiled": {
+ "id": "24f5bc7b-f6a1-425d-8ab1-f50b4db5d0df",
+ "name": "tiled",
+ "type": "boolean",
+ "fieldKind": "input",
+ "label": "",
+ "value": false
+ },
+ "fp32": {
+ "id": "b146d873-ffb9-4767-986a-5360504841a2",
+ "name": "fp32",
+ "type": "boolean",
+ "fieldKind": "input",
+ "label": "",
+ "value": true
+ },
+ "latents": {
+ "id": "65441abd-7713-4b00-9d8d-3771404002e8",
+ "name": "latents",
+ "type": "LatentsField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "vae": {
+ "id": "a478b833-6e13-4611-9a10-842c89603c74",
+ "name": "vae",
+ "type": "VaeField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "image": {
+ "id": "c87ae925-f858-417a-8940-8708ba9b4b53",
+ "name": "image",
+ "type": "ImageField",
+ "fieldKind": "output"
+ },
+ "width": {
+ "id": "4bcb8512-b5a1-45f1-9e52-6e92849f9d6c",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "output"
+ },
+ "height": {
+ "id": "23e41c00-a354-48e8-8f59-5875679c27ab",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": true,
+ "isIntermediate": false
+ },
+ "width": 320,
+ "height": 224,
+ "position": {
+ "x": 2025,
+ "y": -250
+ }
+ },
+ {
+ "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
+ "type": "rand_int",
+ "inputs": {
+ "low": {
+ "id": "3ec65a37-60ba-4b6c-a0b2-553dd7a84b84",
+ "name": "low",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "high": {
+ "id": "085f853a-1a5f-494d-8bec-e4ba29a3f2d1",
+ "name": "high",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 2147483647
+ }
+ },
+ "outputs": {
+ "value": {
+ "id": "812ade4d-7699-4261-b9fc-a6c9d2ab55ee",
+ "name": "value",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "Random Seed",
+ "isOpen": false,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 32,
+ "position": {
+ "x": 1650,
+ "y": -350
+ }
+ },
+ {
+ "id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
+ "type": "sdxl_model_loader",
+ "inputs": {
+ "model": {
+ "id": "39f9e799-bc95-4318-a200-30eed9e60c42",
+ "name": "model",
+ "type": "SDXLMainModelField",
+ "fieldKind": "input",
+ "label": "",
+ "value": {
+ "model_name": "stable-diffusion-xl-base-1.0",
+ "base_model": "sdxl",
+ "model_type": "main"
+ }
+ }
+ },
+ "outputs": {
+ "unet": {
+ "id": "2626a45e-59aa-4609-b131-2d45c5eaed69",
+ "name": "unet",
+ "type": "UNetField",
+ "fieldKind": "output"
+ },
+ "clip": {
+ "id": "7c9c42fa-93d5-4639-ab8b-c4d9b0559baf",
+ "name": "clip",
+ "type": "ClipField",
+ "fieldKind": "output"
+ },
+ "clip2": {
+ "id": "0dafddcf-a472-49c1-a47c-7b8fab4c8bc9",
+ "name": "clip2",
+ "type": "ClipField",
+ "fieldKind": "output"
+ },
+ "vae": {
+ "id": "ee6a6997-1b3c-4ff3-99ce-1e7bfba2750c",
+ "name": "vae",
+ "type": "VaeField",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 234,
+ "position": {
+ "x": 475,
+ "y": 25
+ }
+ },
+ {
+ "id": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
+ "type": "sdxl_compel_prompt",
+ "inputs": {
+ "prompt": {
+ "id": "5a6889e6-95cb-462f-8f4a-6b93ae7afaec",
+ "name": "prompt",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Positive Prompt",
+ "value": ""
+ },
+ "style": {
+ "id": "f240d0e6-3a1c-4320-af23-20ebb707c276",
+ "name": "style",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Positive Style",
+ "value": ""
+ },
+ "original_width": {
+ "id": "05af07b0-99a0-4a68-8ad2-697bbdb7fc7e",
+ "name": "original_width",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "original_height": {
+ "id": "2c771996-a998-43b7-9dd3-3792664d4e5b",
+ "name": "original_height",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "crop_top": {
+ "id": "66519dca-a151-4e3e-ae1f-88f1f9877bde",
+ "name": "crop_top",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "crop_left": {
+ "id": "349cf2e9-f3d0-4e16-9ae2-7097d25b6a51",
+ "name": "crop_left",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "target_width": {
+ "id": "44499347-7bd6-4a73-99d6-5a982786db05",
+ "name": "target_width",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "target_height": {
+ "id": "fda359b0-ab80-4f3c-805b-c9f61319d7d2",
+ "name": "target_height",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "clip": {
+ "id": "b447adaf-a649-4a76-a827-046a9fc8d89b",
+ "name": "clip",
+ "type": "ClipField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "clip2": {
+ "id": "86ee4e32-08f9-4baa-9163-31d93f5c0187",
+ "name": "clip2",
+ "type": "ClipField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "conditioning": {
+ "id": "7c10118e-7b4e-4911-b98e-d3ba6347dfd0",
+ "name": "conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "output"
+ }
+ },
+ "label": "SDXL Positive Compel Prompt",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 764,
+ "position": {
+ "x": 900,
+ "y": -350
+ }
+ },
+ {
+ "id": "87ee6243-fb0d-4f77-ad5f-56591659339e",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "87ee6243-fb0d-4f77-ad5f-56591659339e",
+ "type": "denoise_latents",
+ "inputs": {
+ "noise": {
+ "id": "4884a4b7-cc19-4fea-83c7-1f940e6edd24",
+ "name": "noise",
+ "type": "LatentsField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "steps": {
+ "id": "4c61675c-b6b9-41ac-b187-b5c13b587039",
+ "name": "steps",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 36
+ },
+ "cfg_scale": {
+ "id": "f8213f35-4637-4a1a-83f4-1f8cfb9ccd2c",
+ "name": "cfg_scale",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 7.5
+ },
+ "denoising_start": {
+ "id": "01e2f30d-0acd-4e21-98b9-a9b8e24c6db2",
+ "name": "denoising_start",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "denoising_end": {
+ "id": "3db95479-a73b-4c75-9b44-08daec16b224",
+ "name": "denoising_end",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1
+ },
+ "scheduler": {
+ "id": "db8430a9-64c3-4c54-ae38-9f597cf7b6d5",
+ "name": "scheduler",
+ "type": "Scheduler",
+ "fieldKind": "input",
+ "label": "",
+ "value": "euler"
+ },
+ "control": {
+ "id": "599b49e8-6435-4576-be41-a5155f3a17e3",
+ "name": "control",
+ "type": "ControlField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "latents": {
+ "id": "226f9e91-454e-4159-9fa6-019c0cf29277",
+ "name": "latents",
+ "type": "LatentsField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "denoise_mask": {
+ "id": "de019cb6-7fb5-45bf-a266-22e20889893f",
+ "name": "denoise_mask",
+ "type": "DenoiseMaskField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "positive_conditioning": {
+ "id": "02fc400a-110d-470e-8411-f404f966a949",
+ "name": "positive_conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "negative_conditioning": {
+ "id": "4bd3bdfa-fcf4-42be-8e47-1e314255798f",
+ "name": "negative_conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "unet": {
+ "id": "7c2d58a8-b5f1-4e63-8ffd-8ada52c35832",
+ "name": "unet",
+ "type": "UNetField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "latents": {
+ "id": "6a6fa492-de26-4e95-b1d9-a322fe37eb13",
+ "name": "latents",
+ "type": "LatentsField",
+ "fieldKind": "output"
+ },
+ "width": {
+ "id": "a9790729-7d6c-4418-903d-4da961fccf56",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "output"
+ },
+ "height": {
+ "id": "fa74efe5-7330-4a3c-b256-c82a544585b4",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 558,
+ "position": {
+ "x": 1650,
+ "y": -250
+ }
+ }
+ ],
+ "edges": [
+ {
+ "source": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
+ "target": "55705012-79b9-4aac-9f26-c0b10309785b",
+ "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2-55705012-79b9-4aac-9f26-c0b10309785b-collapsed",
+ "type": "collapsed"
+ },
+ {
+ "source": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
+ "sourceHandle": "value",
+ "target": "55705012-79b9-4aac-9f26-c0b10309785b",
+ "targetHandle": "seed",
+ "id": "reactflow__edge-ea94bc37-d995-4a83-aa99-4af42479f2f2value-55705012-79b9-4aac-9f26-c0b10309785bseed",
+ "type": "default"
+ },
+ {
+ "source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
+ "sourceHandle": "clip",
+ "target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
+ "targetHandle": "clip",
+ "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip-faf965a4-7530-427b-b1f3-4ba6505c2a08clip",
+ "type": "default"
+ },
+ {
+ "source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
+ "sourceHandle": "clip2",
+ "target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
+ "targetHandle": "clip2",
+ "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip2-faf965a4-7530-427b-b1f3-4ba6505c2a08clip2",
+ "type": "default"
+ },
+ {
+ "source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
+ "sourceHandle": "clip",
+ "target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
+ "targetHandle": "clip",
+ "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204clip",
+ "type": "default"
+ },
+ {
+ "source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
+ "sourceHandle": "clip2",
+ "target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
+ "targetHandle": "clip2",
+ "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip2-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204clip2",
+ "type": "default"
+ },
+ {
+ "source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
+ "sourceHandle": "vae",
+ "target": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
+ "targetHandle": "vae",
+ "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22vae-dbcd2f98-d809-48c8-bf64-2635f88a2fe9vae",
+ "type": "default"
+ },
+ {
+ "source": "87ee6243-fb0d-4f77-ad5f-56591659339e",
+ "sourceHandle": "latents",
+ "target": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
+ "targetHandle": "latents",
+ "id": "reactflow__edge-87ee6243-fb0d-4f77-ad5f-56591659339elatents-dbcd2f98-d809-48c8-bf64-2635f88a2fe9latents",
+ "type": "default"
+ },
+ {
+ "source": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
+ "sourceHandle": "conditioning",
+ "target": "87ee6243-fb0d-4f77-ad5f-56591659339e",
+ "targetHandle": "positive_conditioning",
+ "id": "reactflow__edge-faf965a4-7530-427b-b1f3-4ba6505c2a08conditioning-87ee6243-fb0d-4f77-ad5f-56591659339epositive_conditioning",
+ "type": "default"
+ },
+ {
+ "source": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
+ "sourceHandle": "conditioning",
+ "target": "87ee6243-fb0d-4f77-ad5f-56591659339e",
+ "targetHandle": "negative_conditioning",
+ "id": "reactflow__edge-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204conditioning-87ee6243-fb0d-4f77-ad5f-56591659339enegative_conditioning",
+ "type": "default"
+ },
+ {
+ "source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
+ "sourceHandle": "unet",
+ "target": "87ee6243-fb0d-4f77-ad5f-56591659339e",
+ "targetHandle": "unet",
+ "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22unet-87ee6243-fb0d-4f77-ad5f-56591659339eunet",
+ "type": "default"
+ },
+ {
+ "source": "55705012-79b9-4aac-9f26-c0b10309785b",
+ "sourceHandle": "noise",
+ "target": "87ee6243-fb0d-4f77-ad5f-56591659339e",
+ "targetHandle": "noise",
+ "id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-87ee6243-fb0d-4f77-ad5f-56591659339enoise",
+ "type": "default"
+ }
+ ]
+}
diff --git a/docs/workflows/SDXL_w_Refiner_Text_to_Image.json b/docs/workflows/SDXL_w_Refiner_Text_to_Image.json
new file mode 100644
index 0000000000..22ffb8262d
--- /dev/null
+++ b/docs/workflows/SDXL_w_Refiner_Text_to_Image.json
@@ -0,0 +1,1404 @@
+{
+ "name": "SDXL w/Refiner Text to Image",
+ "author": "InvokeAI",
+ "description": "Sample text to image workflow for SDXL with the refiner",
+ "version": "1.0.1",
+ "contact": "invoke@invoke.ai",
+ "tags": "text2image, SDXL, default, refiner",
+ "notes": "",
+ "exposedFields": [
+ {
+ "nodeId": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
+ "fieldName": "model"
+ },
+ {
+ "nodeId": "65b56526-ef0a-4c1f-adda-1017c925b063",
+ "fieldName": "steps"
+ },
+ {
+ "nodeId": "06a30867-1e9d-461f-bd58-14a63cc997dd",
+ "fieldName": "scheduler"
+ },
+ {
+ "nodeId": "62bdf243-d98f-4508-b6b5-c3af00ef49f0",
+ "fieldName": "model"
+ },
+ {
+ "nodeId": "a9352523-613a-43e3-b97f-dade7ec317e5",
+ "fieldName": "steps"
+ },
+ {
+ "nodeId": "b2b35add-929d-4538-aecb-02c661768b29",
+ "fieldName": "value"
+ },
+ {
+ "nodeId": "f1a6a160-4c36-4902-8eeb-8b1c23e81bc8",
+ "fieldName": "value"
+ },
+ {
+ "nodeId": "5639e3bc-b769-4ae5-9262-72db703c5a7b",
+ "fieldName": "value"
+ },
+ {
+ "nodeId": "8d54b9db-3662-43af-8369-9a277e063f3b",
+ "fieldName": "value"
+ }
+ ],
+ "meta": {
+ "version": "1.0.0"
+ },
+ "nodes": [
+ {
+ "id": "a9352523-613a-43e3-b97f-dade7ec317e5",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "a9352523-613a-43e3-b97f-dade7ec317e5",
+ "type": "denoise_latents",
+ "inputs": {
+ "noise": {
+ "id": "962fb1ba-341c-441c-940b-1543caafab29",
+ "name": "noise",
+ "type": "LatentsField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "steps": {
+ "id": "2b76247b-cc60-4ef0-8a51-290700590805",
+ "name": "steps",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "Refiner Steps",
+ "value": 36
+ },
+ "cfg_scale": {
+ "id": "f13c5cf5-6198-4183-9b47-0a44c5666a2a",
+ "name": "cfg_scale",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 7.5
+ },
+ "denoising_start": {
+ "id": "397bb49d-7d00-465b-a918-456910d7fedb",
+ "name": "denoising_start",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0.8
+ },
+ "denoising_end": {
+ "id": "dac6aa2a-d074-4e86-af0c-def573dd69ac",
+ "name": "denoising_end",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1
+ },
+ "scheduler": {
+ "id": "34f9f11c-f2fc-48b2-b015-ededbf2d000f",
+ "name": "scheduler",
+ "type": "Scheduler",
+ "fieldKind": "input",
+ "label": "",
+ "value": "euler"
+ },
+ "control": {
+ "id": "80c69321-e712-453b-b8a8-b4e03d37844c",
+ "name": "control",
+ "type": "ControlField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "latents": {
+ "id": "8122d26c-ad2f-4f65-93d5-9ebb426bdba4",
+ "name": "latents",
+ "type": "LatentsField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "denoise_mask": {
+ "id": "5dc048d6-28c3-4db4-9e8b-652006616c17",
+ "name": "denoise_mask",
+ "type": "DenoiseMaskField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "positive_conditioning": {
+ "id": "dd5cab6f-6dbd-4791-a21a-ef0544f26f8f",
+ "name": "positive_conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "negative_conditioning": {
+ "id": "5efedcb9-3286-426a-ad57-f77b2d7d1898",
+ "name": "negative_conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "unet": {
+ "id": "3ebc07f0-4cd7-4f4d-a5b3-a8ce13383305",
+ "name": "unet",
+ "type": "UNetField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "latents": {
+ "id": "ca9c565a-1dda-428c-9fdf-7c51eb7fa9c5",
+ "name": "latents",
+ "type": "LatentsField",
+ "fieldKind": "output"
+ },
+ "width": {
+ "id": "d91b7cbd-fe5c-4c92-923e-241d1a63648c",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "output"
+ },
+ "height": {
+ "id": "af9ee999-a666-42a8-8e5c-d04518c4aa8e",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 558,
+ "position": {
+ "x": 1650,
+ "y": -150
+ }
+ },
+ {
+ "id": "b2b35add-929d-4538-aecb-02c661768b29",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "b2b35add-929d-4538-aecb-02c661768b29",
+ "type": "string",
+ "inputs": {
+ "value": {
+ "id": "89854c84-cbc1-4b60-921d-4bade11cab66",
+ "name": "value",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Positive Prompt",
+ "value": "super cute tiger"
+ }
+ },
+ "outputs": {
+ "value": {
+ "id": "3617404e-e483-4e2e-8550-7080a1ef283f",
+ "name": "value",
+ "type": "string",
+ "fieldKind": "output"
+ }
+ },
+ "label": "Positive Prompt",
+ "isOpen": false,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 32,
+ "position": {
+ "x": 700,
+ "y": -75
+ }
+ },
+ {
+ "id": "8d54b9db-3662-43af-8369-9a277e063f3b",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "8d54b9db-3662-43af-8369-9a277e063f3b",
+ "type": "string",
+ "inputs": {
+ "value": {
+ "id": "89854c84-cbc1-4b60-921d-4bade11cab66",
+ "name": "value",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Negative Style",
+ "value": ""
+ }
+ },
+ "outputs": {
+ "value": {
+ "id": "3617404e-e483-4e2e-8550-7080a1ef283f",
+ "name": "value",
+ "type": "string",
+ "fieldKind": "output"
+ }
+ },
+ "label": "Negative Style",
+ "isOpen": false,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 32,
+ "position": {
+ "x": 700,
+ "y": 75
+ }
+ },
+ {
+ "id": "f1a6a160-4c36-4902-8eeb-8b1c23e81bc8",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "f1a6a160-4c36-4902-8eeb-8b1c23e81bc8",
+ "type": "string",
+ "inputs": {
+ "value": {
+ "id": "89854c84-cbc1-4b60-921d-4bade11cab66",
+ "name": "value",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Positive Style",
+ "value": ""
+ }
+ },
+ "outputs": {
+ "value": {
+ "id": "3617404e-e483-4e2e-8550-7080a1ef283f",
+ "name": "value",
+ "type": "string",
+ "fieldKind": "output"
+ }
+ },
+ "label": "Postive Style",
+ "isOpen": false,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 32,
+ "position": {
+ "x": 700,
+ "y": -25
+ }
+ },
+ {
+ "id": "fbb2f1a0-2e68-411d-a955-60c3b8a6f2d1",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "fbb2f1a0-2e68-411d-a955-60c3b8a6f2d1",
+ "type": "sdxl_refiner_compel_prompt",
+ "inputs": {
+ "style": {
+ "id": "c6f91ecf-370f-44d0-8243-63724e75510a",
+ "name": "style",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Negative Style",
+ "value": ""
+ },
+ "original_width": {
+ "id": "956f6eca-8324-41eb-a8dd-fa9b34164ca6",
+ "name": "original_width",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "original_height": {
+ "id": "a41bb3a1-7664-4dac-b6ae-6f4dff3828a9",
+ "name": "original_height",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "crop_top": {
+ "id": "81936e19-0ae7-4006-9d7c-e359fc7c7d15",
+ "name": "crop_top",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "crop_left": {
+ "id": "be94ddb8-88cc-4d6b-a2c0-f2b43143bfa1",
+ "name": "crop_left",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "aesthetic_score": {
+ "id": "60f380de-87b4-4535-b3ef-545a6e57283e",
+ "name": "aesthetic_score",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 6
+ },
+ "clip2": {
+ "id": "773c4054-c005-46ad-92c4-5c1fa4506041",
+ "name": "clip2",
+ "type": "ClipField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "conditioning": {
+ "id": "63e16bfa-59d8-4d6f-abda-ad979b26adb5",
+ "name": "conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "output"
+ }
+ },
+ "label": "SDXL Refiner Negative Compel Prompt",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 520,
+ "position": {
+ "x": 1625,
+ "y": -925
+ }
+ },
+ {
+ "id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
+ "type": "sdxl_compel_prompt",
+ "inputs": {
+ "prompt": {
+ "id": "5a6889e6-95cb-462f-8f4a-6b93ae7afaec",
+ "name": "prompt",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Negative Prompt",
+ "value": ""
+ },
+ "style": {
+ "id": "f240d0e6-3a1c-4320-af23-20ebb707c276",
+ "name": "style",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Negative Style",
+ "value": ""
+ },
+ "original_width": {
+ "id": "05af07b0-99a0-4a68-8ad2-697bbdb7fc7e",
+ "name": "original_width",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "original_height": {
+ "id": "2c771996-a998-43b7-9dd3-3792664d4e5b",
+ "name": "original_height",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "crop_top": {
+ "id": "66519dca-a151-4e3e-ae1f-88f1f9877bde",
+ "name": "crop_top",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "crop_left": {
+ "id": "349cf2e9-f3d0-4e16-9ae2-7097d25b6a51",
+ "name": "crop_left",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "target_width": {
+ "id": "44499347-7bd6-4a73-99d6-5a982786db05",
+ "name": "target_width",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "target_height": {
+ "id": "fda359b0-ab80-4f3c-805b-c9f61319d7d2",
+ "name": "target_height",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "clip": {
+ "id": "b447adaf-a649-4a76-a827-046a9fc8d89b",
+ "name": "clip",
+ "type": "ClipField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "clip2": {
+ "id": "86ee4e32-08f9-4baa-9163-31d93f5c0187",
+ "name": "clip2",
+ "type": "ClipField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "conditioning": {
+ "id": "7c10118e-7b4e-4911-b98e-d3ba6347dfd0",
+ "name": "conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "output"
+ }
+ },
+ "label": "SDXL Negative Compel Prompt",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 764,
+ "position": {
+ "x": 900,
+ "y": -925
+ }
+ },
+ {
+ "id": "55705012-79b9-4aac-9f26-c0b10309785b",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "55705012-79b9-4aac-9f26-c0b10309785b",
+ "type": "noise",
+ "inputs": {
+ "seed": {
+ "id": "6431737c-918a-425d-a3b4-5d57e2f35d4d",
+ "name": "seed",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "width": {
+ "id": "38fc5b66-fe6e-47c8-bba9-daf58e454ed7",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "height": {
+ "id": "16298330-e2bf-4872-a514-d6923df53cbb",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "use_cpu": {
+ "id": "c7c436d3-7a7a-4e76-91e4-c6deb271623c",
+ "name": "use_cpu",
+ "type": "boolean",
+ "fieldKind": "input",
+ "label": "",
+ "value": true
+ }
+ },
+ "outputs": {
+ "noise": {
+ "id": "50f650dc-0184-4e23-a927-0497a96fe954",
+ "name": "noise",
+ "type": "LatentsField",
+ "fieldKind": "output"
+ },
+ "width": {
+ "id": "bb8a452b-133d-42d1-ae4a-3843d7e4109a",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "output"
+ },
+ "height": {
+ "id": "35cfaa12-3b8b-4b7a-a884-327ff3abddd9",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": false,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 32,
+ "position": {
+ "x": 1275,
+ "y": -200
+ }
+ },
+ {
+ "id": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
+ "type": "l2i",
+ "inputs": {
+ "tiled": {
+ "id": "24f5bc7b-f6a1-425d-8ab1-f50b4db5d0df",
+ "name": "tiled",
+ "type": "boolean",
+ "fieldKind": "input",
+ "label": "",
+ "value": false
+ },
+ "fp32": {
+ "id": "b146d873-ffb9-4767-986a-5360504841a2",
+ "name": "fp32",
+ "type": "boolean",
+ "fieldKind": "input",
+ "label": "",
+ "value": true
+ },
+ "latents": {
+ "id": "65441abd-7713-4b00-9d8d-3771404002e8",
+ "name": "latents",
+ "type": "LatentsField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "vae": {
+ "id": "a478b833-6e13-4611-9a10-842c89603c74",
+ "name": "vae",
+ "type": "VaeField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "image": {
+ "id": "c87ae925-f858-417a-8940-8708ba9b4b53",
+ "name": "image",
+ "type": "ImageField",
+ "fieldKind": "output"
+ },
+ "width": {
+ "id": "4bcb8512-b5a1-45f1-9e52-6e92849f9d6c",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "output"
+ },
+ "height": {
+ "id": "23e41c00-a354-48e8-8f59-5875679c27ab",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": true,
+ "isIntermediate": false
+ },
+ "width": 320,
+ "height": 266,
+ "position": {
+ "x": 2075,
+ "y": -400
+ }
+ },
+ {
+ "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
+ "type": "rand_int",
+ "inputs": {
+ "low": {
+ "id": "3ec65a37-60ba-4b6c-a0b2-553dd7a84b84",
+ "name": "low",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "high": {
+ "id": "085f853a-1a5f-494d-8bec-e4ba29a3f2d1",
+ "name": "high",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 2147483647
+ }
+ },
+ "outputs": {
+ "value": {
+ "id": "812ade4d-7699-4261-b9fc-a6c9d2ab55ee",
+ "name": "value",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "Random Seed",
+ "isOpen": false,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 32,
+ "position": {
+ "x": 1275,
+ "y": -250
+ }
+ },
+ {
+ "id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
+ "type": "sdxl_model_loader",
+ "inputs": {
+ "model": {
+ "id": "39f9e799-bc95-4318-a200-30eed9e60c42",
+ "name": "model",
+ "type": "SDXLMainModelField",
+ "fieldKind": "input",
+ "label": "",
+ "value": {
+ "model_name": "stable-diffusion-xl-base-1.0",
+ "base_model": "sdxl",
+ "model_type": "main"
+ }
+ }
+ },
+ "outputs": {
+ "unet": {
+ "id": "2626a45e-59aa-4609-b131-2d45c5eaed69",
+ "name": "unet",
+ "type": "UNetField",
+ "fieldKind": "output"
+ },
+ "clip": {
+ "id": "7c9c42fa-93d5-4639-ab8b-c4d9b0559baf",
+ "name": "clip",
+ "type": "ClipField",
+ "fieldKind": "output"
+ },
+ "clip2": {
+ "id": "0dafddcf-a472-49c1-a47c-7b8fab4c8bc9",
+ "name": "clip2",
+ "type": "ClipField",
+ "fieldKind": "output"
+ },
+ "vae": {
+ "id": "ee6a6997-1b3c-4ff3-99ce-1e7bfba2750c",
+ "name": "vae",
+ "type": "VaeField",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": false,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 32,
+ "position": {
+ "x": 700,
+ "y": 175
+ }
+ },
+ {
+ "id": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
+ "type": "sdxl_compel_prompt",
+ "inputs": {
+ "prompt": {
+ "id": "5a6889e6-95cb-462f-8f4a-6b93ae7afaec",
+ "name": "prompt",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Positive Prompt",
+ "value": ""
+ },
+ "style": {
+ "id": "f240d0e6-3a1c-4320-af23-20ebb707c276",
+ "name": "style",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Positive Style",
+ "value": ""
+ },
+ "original_width": {
+ "id": "05af07b0-99a0-4a68-8ad2-697bbdb7fc7e",
+ "name": "original_width",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "original_height": {
+ "id": "2c771996-a998-43b7-9dd3-3792664d4e5b",
+ "name": "original_height",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "crop_top": {
+ "id": "66519dca-a151-4e3e-ae1f-88f1f9877bde",
+ "name": "crop_top",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "crop_left": {
+ "id": "349cf2e9-f3d0-4e16-9ae2-7097d25b6a51",
+ "name": "crop_left",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "target_width": {
+ "id": "44499347-7bd6-4a73-99d6-5a982786db05",
+ "name": "target_width",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "target_height": {
+ "id": "fda359b0-ab80-4f3c-805b-c9f61319d7d2",
+ "name": "target_height",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "clip": {
+ "id": "b447adaf-a649-4a76-a827-046a9fc8d89b",
+ "name": "clip",
+ "type": "ClipField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "clip2": {
+ "id": "86ee4e32-08f9-4baa-9163-31d93f5c0187",
+ "name": "clip2",
+ "type": "ClipField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "conditioning": {
+ "id": "7c10118e-7b4e-4911-b98e-d3ba6347dfd0",
+ "name": "conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "output"
+ }
+ },
+ "label": "SDXL Positive Compel Prompt",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 764,
+ "position": {
+ "x": 550,
+ "y": -925
+ }
+ },
+ {
+ "id": "f0e06b70-9f53-44e3-8f5f-63d813b6b579",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "f0e06b70-9f53-44e3-8f5f-63d813b6b579",
+ "type": "sdxl_refiner_compel_prompt",
+ "inputs": {
+ "style": {
+ "id": "c6f91ecf-370f-44d0-8243-63724e75510a",
+ "name": "style",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Positive Style",
+ "value": ""
+ },
+ "original_width": {
+ "id": "956f6eca-8324-41eb-a8dd-fa9b34164ca6",
+ "name": "original_width",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "original_height": {
+ "id": "a41bb3a1-7664-4dac-b6ae-6f4dff3828a9",
+ "name": "original_height",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1024
+ },
+ "crop_top": {
+ "id": "81936e19-0ae7-4006-9d7c-e359fc7c7d15",
+ "name": "crop_top",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "crop_left": {
+ "id": "be94ddb8-88cc-4d6b-a2c0-f2b43143bfa1",
+ "name": "crop_left",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "aesthetic_score": {
+ "id": "60f380de-87b4-4535-b3ef-545a6e57283e",
+ "name": "aesthetic_score",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 6
+ },
+ "clip2": {
+ "id": "773c4054-c005-46ad-92c4-5c1fa4506041",
+ "name": "clip2",
+ "type": "ClipField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "conditioning": {
+ "id": "63e16bfa-59d8-4d6f-abda-ad979b26adb5",
+ "name": "conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "output"
+ }
+ },
+ "label": "SDXL Refiner Positive Compel Prompt",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 520,
+ "position": {
+ "x": 1275,
+ "y": -925
+ }
+ },
+ {
+ "id": "62bdf243-d98f-4508-b6b5-c3af00ef49f0",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "62bdf243-d98f-4508-b6b5-c3af00ef49f0",
+ "type": "sdxl_refiner_model_loader",
+ "inputs": {
+ "model": {
+ "id": "2f09431c-096b-4848-b580-b37be773839d",
+ "name": "model",
+ "type": "SDXLRefinerModelField",
+ "fieldKind": "input",
+ "label": "Refiner Model",
+ "value": {
+ "model_name": "stable-diffusion-xl-refiner-1.0",
+ "base_model": "sdxl-refiner",
+ "model_type": "main"
+ }
+ }
+ },
+ "outputs": {
+ "unet": {
+ "id": "c06b335a-7f65-4165-9ca2-40107eb9c85b",
+ "name": "unet",
+ "type": "UNetField",
+ "fieldKind": "output"
+ },
+ "clip2": {
+ "id": "81ec105e-cc1f-4b78-a5bb-8df3a4dd2574",
+ "name": "clip2",
+ "type": "ClipField",
+ "fieldKind": "output"
+ },
+ "vae": {
+ "id": "f516feab-873d-47ec-a946-b8f15eed4bed",
+ "name": "vae",
+ "type": "VaeField",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": false,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 32,
+ "position": {
+ "x": 700,
+ "y": 225
+ }
+ },
+ {
+ "id": "5639e3bc-b769-4ae5-9262-72db703c5a7b",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "5639e3bc-b769-4ae5-9262-72db703c5a7b",
+ "type": "string",
+ "inputs": {
+ "value": {
+ "id": "89854c84-cbc1-4b60-921d-4bade11cab66",
+ "name": "value",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Negative Prompt",
+ "value": ""
+ }
+ },
+ "outputs": {
+ "value": {
+ "id": "3617404e-e483-4e2e-8550-7080a1ef283f",
+ "name": "value",
+ "type": "string",
+ "fieldKind": "output"
+ }
+ },
+ "label": "Negative Prompt",
+ "isOpen": false,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 32,
+ "position": {
+ "x": 700,
+ "y": 25
+ }
+ },
+ {
+ "id": "65b56526-ef0a-4c1f-adda-1017c925b063",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "65b56526-ef0a-4c1f-adda-1017c925b063",
+ "type": "denoise_latents",
+ "inputs": {
+ "noise": {
+ "id": "962fb1ba-341c-441c-940b-1543caafab29",
+ "name": "noise",
+ "type": "LatentsField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "steps": {
+ "id": "2b76247b-cc60-4ef0-8a51-290700590805",
+ "name": "steps",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 36
+ },
+ "cfg_scale": {
+ "id": "f13c5cf5-6198-4183-9b47-0a44c5666a2a",
+ "name": "cfg_scale",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 7.5
+ },
+ "denoising_start": {
+ "id": "397bb49d-7d00-465b-a918-456910d7fedb",
+ "name": "denoising_start",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "denoising_end": {
+ "id": "dac6aa2a-d074-4e86-af0c-def573dd69ac",
+ "name": "denoising_end",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0.8
+ },
+ "scheduler": {
+ "id": "34f9f11c-f2fc-48b2-b015-ededbf2d000f",
+ "name": "scheduler",
+ "type": "Scheduler",
+ "fieldKind": "input",
+ "label": "",
+ "value": "euler"
+ },
+ "control": {
+ "id": "80c69321-e712-453b-b8a8-b4e03d37844c",
+ "name": "control",
+ "type": "ControlField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "latents": {
+ "id": "8122d26c-ad2f-4f65-93d5-9ebb426bdba4",
+ "name": "latents",
+ "type": "LatentsField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "denoise_mask": {
+ "id": "5dc048d6-28c3-4db4-9e8b-652006616c17",
+ "name": "denoise_mask",
+ "type": "DenoiseMaskField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "positive_conditioning": {
+ "id": "dd5cab6f-6dbd-4791-a21a-ef0544f26f8f",
+ "name": "positive_conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "negative_conditioning": {
+ "id": "5efedcb9-3286-426a-ad57-f77b2d7d1898",
+ "name": "negative_conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "unet": {
+ "id": "3ebc07f0-4cd7-4f4d-a5b3-a8ce13383305",
+ "name": "unet",
+ "type": "UNetField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "latents": {
+ "id": "ca9c565a-1dda-428c-9fdf-7c51eb7fa9c5",
+ "name": "latents",
+ "type": "LatentsField",
+ "fieldKind": "output"
+ },
+ "width": {
+ "id": "d91b7cbd-fe5c-4c92-923e-241d1a63648c",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "output"
+ },
+ "height": {
+ "id": "af9ee999-a666-42a8-8e5c-d04518c4aa8e",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 558,
+ "position": {
+ "x": 1275,
+ "y": -150
+ }
+ },
+ {
+ "id": "06a30867-1e9d-461f-bd58-14a63cc997dd",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "06a30867-1e9d-461f-bd58-14a63cc997dd",
+ "type": "scheduler",
+ "inputs": {
+ "scheduler": {
+ "id": "0be5a5a0-3388-41e1-a6c4-10d414d8fe5b",
+ "name": "scheduler",
+ "type": "Scheduler",
+ "fieldKind": "input",
+ "label": "",
+ "value": "euler"
+ }
+ },
+ "outputs": {
+ "scheduler": {
+ "id": "cafdef9d-61cd-4f43-be91-356a1d65afca",
+ "name": "scheduler",
+ "type": "Scheduler",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": false,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 32,
+ "position": {
+ "x": 700,
+ "y": 125
+ }
+ }
+ ],
+ "edges": [
+ {
+ "source": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
+ "target": "55705012-79b9-4aac-9f26-c0b10309785b",
+ "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2-55705012-79b9-4aac-9f26-c0b10309785b-collapsed",
+ "type": "collapsed"
+ },
+ {
+ "source": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
+ "sourceHandle": "value",
+ "target": "55705012-79b9-4aac-9f26-c0b10309785b",
+ "targetHandle": "seed",
+ "id": "reactflow__edge-ea94bc37-d995-4a83-aa99-4af42479f2f2value-55705012-79b9-4aac-9f26-c0b10309785bseed",
+ "type": "default"
+ },
+ {
+ "source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
+ "sourceHandle": "clip",
+ "target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
+ "targetHandle": "clip",
+ "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip-faf965a4-7530-427b-b1f3-4ba6505c2a08clip",
+ "type": "default"
+ },
+ {
+ "source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
+ "sourceHandle": "clip2",
+ "target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
+ "targetHandle": "clip2",
+ "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip2-faf965a4-7530-427b-b1f3-4ba6505c2a08clip2",
+ "type": "default"
+ },
+ {
+ "source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
+ "sourceHandle": "clip",
+ "target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
+ "targetHandle": "clip",
+ "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204clip",
+ "type": "default"
+ },
+ {
+ "source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
+ "sourceHandle": "clip2",
+ "target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
+ "targetHandle": "clip2",
+ "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip2-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204clip2",
+ "type": "default"
+ },
+ {
+ "source": "62bdf243-d98f-4508-b6b5-c3af00ef49f0",
+ "sourceHandle": "clip2",
+ "target": "f0e06b70-9f53-44e3-8f5f-63d813b6b579",
+ "targetHandle": "clip2",
+ "id": "reactflow__edge-62bdf243-d98f-4508-b6b5-c3af00ef49f0clip2-f0e06b70-9f53-44e3-8f5f-63d813b6b579clip2",
+ "type": "default"
+ },
+ {
+ "source": "62bdf243-d98f-4508-b6b5-c3af00ef49f0",
+ "sourceHandle": "clip2",
+ "target": "fbb2f1a0-2e68-411d-a955-60c3b8a6f2d1",
+ "targetHandle": "clip2",
+ "id": "reactflow__edge-62bdf243-d98f-4508-b6b5-c3af00ef49f0clip2-fbb2f1a0-2e68-411d-a955-60c3b8a6f2d1clip2",
+ "type": "default"
+ },
+ {
+ "source": "5639e3bc-b769-4ae5-9262-72db703c5a7b",
+ "sourceHandle": "value",
+ "target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
+ "targetHandle": "prompt",
+ "id": "reactflow__edge-5639e3bc-b769-4ae5-9262-72db703c5a7bvalue-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204prompt",
+ "type": "default"
+ },
+ {
+ "source": "f1a6a160-4c36-4902-8eeb-8b1c23e81bc8",
+ "sourceHandle": "value",
+ "target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
+ "targetHandle": "style",
+ "id": "reactflow__edge-f1a6a160-4c36-4902-8eeb-8b1c23e81bc8value-faf965a4-7530-427b-b1f3-4ba6505c2a08style",
+ "type": "default"
+ },
+ {
+ "source": "b2b35add-929d-4538-aecb-02c661768b29",
+ "sourceHandle": "value",
+ "target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
+ "targetHandle": "prompt",
+ "id": "reactflow__edge-b2b35add-929d-4538-aecb-02c661768b29value-faf965a4-7530-427b-b1f3-4ba6505c2a08prompt",
+ "type": "default"
+ },
+ {
+ "source": "f1a6a160-4c36-4902-8eeb-8b1c23e81bc8",
+ "sourceHandle": "value",
+ "target": "f0e06b70-9f53-44e3-8f5f-63d813b6b579",
+ "targetHandle": "style",
+ "id": "reactflow__edge-f1a6a160-4c36-4902-8eeb-8b1c23e81bc8value-f0e06b70-9f53-44e3-8f5f-63d813b6b579style",
+ "type": "default"
+ },
+ {
+ "source": "8d54b9db-3662-43af-8369-9a277e063f3b",
+ "sourceHandle": "value",
+ "target": "fbb2f1a0-2e68-411d-a955-60c3b8a6f2d1",
+ "targetHandle": "style",
+ "id": "reactflow__edge-8d54b9db-3662-43af-8369-9a277e063f3bvalue-fbb2f1a0-2e68-411d-a955-60c3b8a6f2d1style",
+ "type": "default"
+ },
+ {
+ "source": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
+ "sourceHandle": "conditioning",
+ "target": "65b56526-ef0a-4c1f-adda-1017c925b063",
+ "targetHandle": "positive_conditioning",
+ "id": "reactflow__edge-faf965a4-7530-427b-b1f3-4ba6505c2a08conditioning-65b56526-ef0a-4c1f-adda-1017c925b063positive_conditioning",
+ "type": "default"
+ },
+ {
+ "source": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
+ "sourceHandle": "conditioning",
+ "target": "65b56526-ef0a-4c1f-adda-1017c925b063",
+ "targetHandle": "negative_conditioning",
+ "id": "reactflow__edge-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204conditioning-65b56526-ef0a-4c1f-adda-1017c925b063negative_conditioning",
+ "type": "default"
+ },
+ {
+ "source": "55705012-79b9-4aac-9f26-c0b10309785b",
+ "sourceHandle": "noise",
+ "target": "65b56526-ef0a-4c1f-adda-1017c925b063",
+ "targetHandle": "noise",
+ "id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-65b56526-ef0a-4c1f-adda-1017c925b063noise",
+ "type": "default"
+ },
+ {
+ "source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
+ "sourceHandle": "unet",
+ "target": "65b56526-ef0a-4c1f-adda-1017c925b063",
+ "targetHandle": "unet",
+ "id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22unet-65b56526-ef0a-4c1f-adda-1017c925b063unet",
+ "type": "default"
+ },
+ {
+ "source": "fbb2f1a0-2e68-411d-a955-60c3b8a6f2d1",
+ "sourceHandle": "conditioning",
+ "target": "a9352523-613a-43e3-b97f-dade7ec317e5",
+ "targetHandle": "negative_conditioning",
+ "id": "reactflow__edge-fbb2f1a0-2e68-411d-a955-60c3b8a6f2d1conditioning-a9352523-613a-43e3-b97f-dade7ec317e5negative_conditioning",
+ "type": "default"
+ },
+ {
+ "source": "f0e06b70-9f53-44e3-8f5f-63d813b6b579",
+ "sourceHandle": "conditioning",
+ "target": "a9352523-613a-43e3-b97f-dade7ec317e5",
+ "targetHandle": "positive_conditioning",
+ "id": "reactflow__edge-f0e06b70-9f53-44e3-8f5f-63d813b6b579conditioning-a9352523-613a-43e3-b97f-dade7ec317e5positive_conditioning",
+ "type": "default"
+ },
+ {
+ "source": "a9352523-613a-43e3-b97f-dade7ec317e5",
+ "sourceHandle": "latents",
+ "target": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
+ "targetHandle": "latents",
+ "id": "reactflow__edge-a9352523-613a-43e3-b97f-dade7ec317e5latents-dbcd2f98-d809-48c8-bf64-2635f88a2fe9latents",
+ "type": "default"
+ },
+ {
+ "source": "62bdf243-d98f-4508-b6b5-c3af00ef49f0",
+ "sourceHandle": "vae",
+ "target": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
+ "targetHandle": "vae",
+ "id": "reactflow__edge-62bdf243-d98f-4508-b6b5-c3af00ef49f0vae-dbcd2f98-d809-48c8-bf64-2635f88a2fe9vae",
+ "type": "default"
+ },
+ {
+ "source": "62bdf243-d98f-4508-b6b5-c3af00ef49f0",
+ "sourceHandle": "unet",
+ "target": "a9352523-613a-43e3-b97f-dade7ec317e5",
+ "targetHandle": "unet",
+ "id": "reactflow__edge-62bdf243-d98f-4508-b6b5-c3af00ef49f0unet-a9352523-613a-43e3-b97f-dade7ec317e5unet",
+ "type": "default"
+ },
+ {
+ "source": "65b56526-ef0a-4c1f-adda-1017c925b063",
+ "sourceHandle": "latents",
+ "target": "a9352523-613a-43e3-b97f-dade7ec317e5",
+ "targetHandle": "latents",
+ "id": "reactflow__edge-65b56526-ef0a-4c1f-adda-1017c925b063latents-a9352523-613a-43e3-b97f-dade7ec317e5latents",
+ "type": "default"
+ },
+ {
+ "source": "06a30867-1e9d-461f-bd58-14a63cc997dd",
+ "sourceHandle": "scheduler",
+ "target": "65b56526-ef0a-4c1f-adda-1017c925b063",
+ "targetHandle": "scheduler",
+ "id": "reactflow__edge-06a30867-1e9d-461f-bd58-14a63cc997ddscheduler-65b56526-ef0a-4c1f-adda-1017c925b063scheduler",
+ "type": "default"
+ },
+ {
+ "source": "06a30867-1e9d-461f-bd58-14a63cc997dd",
+ "sourceHandle": "scheduler",
+ "target": "a9352523-613a-43e3-b97f-dade7ec317e5",
+ "targetHandle": "scheduler",
+ "id": "reactflow__edge-06a30867-1e9d-461f-bd58-14a63cc997ddscheduler-a9352523-613a-43e3-b97f-dade7ec317e5scheduler",
+ "type": "default"
+ },
+ {
+ "source": "8d54b9db-3662-43af-8369-9a277e063f3b",
+ "sourceHandle": "value",
+ "target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
+ "targetHandle": "style",
+ "id": "reactflow__edge-8d54b9db-3662-43af-8369-9a277e063f3bvalue-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204style",
+ "type": "default"
+ }
+ ]
+}
diff --git a/docs/workflows/Text_to_Image.json b/docs/workflows/Text_to_Image.json
new file mode 100644
index 0000000000..7239a2247f
--- /dev/null
+++ b/docs/workflows/Text_to_Image.json
@@ -0,0 +1,573 @@
+{
+ "name": "Text to Image",
+ "author": "InvokeAI",
+ "description": "Sample text to image workflow for Stable Diffusion 1.5/2",
+ "version": "1.0.1",
+ "contact": "invoke@invoke.ai",
+ "tags": "text2image, SD1.5, SD2, default",
+ "notes": "",
+ "exposedFields": [
+ {
+ "nodeId": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
+ "fieldName": "model"
+ },
+ {
+ "nodeId": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
+ "fieldName": "prompt"
+ },
+ {
+ "nodeId": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
+ "fieldName": "prompt"
+ },
+ {
+ "nodeId": "75899702-fa44-46d2-b2d5-3e17f234c3e7",
+ "fieldName": "steps"
+ }
+ ],
+ "meta": {
+ "version": "1.0.0"
+ },
+ "nodes": [
+ {
+ "id": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
+ "type": "compel",
+ "inputs": {
+ "prompt": {
+ "id": "7739aff6-26cb-4016-8897-5a1fb2305e4e",
+ "name": "prompt",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Negative Prompt",
+ "value": ""
+ },
+ "clip": {
+ "id": "48d23dce-a6ae-472a-9f8c-22a714ea5ce0",
+ "name": "clip",
+ "type": "ClipField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "conditioning": {
+ "id": "37cf3a9d-f6b7-4b64-8ff6-2558c5ecc447",
+ "name": "conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "output"
+ }
+ },
+ "label": "Negative Compel Prompt",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 235,
+ "position": {
+ "x": 1400,
+ "y": -75
+ }
+ },
+ {
+ "id": "55705012-79b9-4aac-9f26-c0b10309785b",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "55705012-79b9-4aac-9f26-c0b10309785b",
+ "type": "noise",
+ "inputs": {
+ "seed": {
+ "id": "6431737c-918a-425d-a3b4-5d57e2f35d4d",
+ "name": "seed",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "width": {
+ "id": "38fc5b66-fe6e-47c8-bba9-daf58e454ed7",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 512
+ },
+ "height": {
+ "id": "16298330-e2bf-4872-a514-d6923df53cbb",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 512
+ },
+ "use_cpu": {
+ "id": "c7c436d3-7a7a-4e76-91e4-c6deb271623c",
+ "name": "use_cpu",
+ "type": "boolean",
+ "fieldKind": "input",
+ "label": "",
+ "value": true
+ }
+ },
+ "outputs": {
+ "noise": {
+ "id": "50f650dc-0184-4e23-a927-0497a96fe954",
+ "name": "noise",
+ "type": "LatentsField",
+ "fieldKind": "output"
+ },
+ "width": {
+ "id": "bb8a452b-133d-42d1-ae4a-3843d7e4109a",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "output"
+ },
+ "height": {
+ "id": "35cfaa12-3b8b-4b7a-a884-327ff3abddd9",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 364,
+ "position": {
+ "x": 1000,
+ "y": 350
+ }
+ },
+ {
+ "id": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
+ "type": "l2i",
+ "inputs": {
+ "tiled": {
+ "id": "24f5bc7b-f6a1-425d-8ab1-f50b4db5d0df",
+ "name": "tiled",
+ "type": "boolean",
+ "fieldKind": "input",
+ "label": "",
+ "value": false
+ },
+ "fp32": {
+ "id": "b146d873-ffb9-4767-986a-5360504841a2",
+ "name": "fp32",
+ "type": "boolean",
+ "fieldKind": "input",
+ "label": "",
+ "value": false
+ },
+ "latents": {
+ "id": "65441abd-7713-4b00-9d8d-3771404002e8",
+ "name": "latents",
+ "type": "LatentsField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "vae": {
+ "id": "a478b833-6e13-4611-9a10-842c89603c74",
+ "name": "vae",
+ "type": "VaeField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "image": {
+ "id": "c87ae925-f858-417a-8940-8708ba9b4b53",
+ "name": "image",
+ "type": "ImageField",
+ "fieldKind": "output"
+ },
+ "width": {
+ "id": "4bcb8512-b5a1-45f1-9e52-6e92849f9d6c",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "output"
+ },
+ "height": {
+ "id": "23e41c00-a354-48e8-8f59-5875679c27ab",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": true,
+ "isIntermediate": false
+ },
+ "width": 320,
+ "height": 266,
+ "position": {
+ "x": 1800,
+ "y": 200
+ }
+ },
+ {
+ "id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
+ "type": "main_model_loader",
+ "inputs": {
+ "model": {
+ "id": "993eabd2-40fd-44fe-bce7-5d0c7075ddab",
+ "name": "model",
+ "type": "MainModelField",
+ "fieldKind": "input",
+ "label": "",
+ "value": {
+ "model_name": "stable-diffusion-v1-5",
+ "base_model": "sd-1",
+ "model_type": "main"
+ }
+ }
+ },
+ "outputs": {
+ "unet": {
+ "id": "5c18c9db-328d-46d0-8cb9-143391c410be",
+ "name": "unet",
+ "type": "UNetField",
+ "fieldKind": "output"
+ },
+ "clip": {
+ "id": "6effcac0-ec2f-4bf5-a49e-a2c29cf921f4",
+ "name": "clip",
+ "type": "ClipField",
+ "fieldKind": "output"
+ },
+ "vae": {
+ "id": "57683ba3-f5f5-4f58-b9a2-4b83dacad4a1",
+ "name": "vae",
+ "type": "VaeField",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": false,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 32,
+ "position": {
+ "x": 1000,
+ "y": 200
+ }
+ },
+ {
+ "id": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
+ "type": "compel",
+ "inputs": {
+ "prompt": {
+ "id": "7739aff6-26cb-4016-8897-5a1fb2305e4e",
+ "name": "prompt",
+ "type": "string",
+ "fieldKind": "input",
+ "label": "Positive Prompt",
+ "value": ""
+ },
+ "clip": {
+ "id": "48d23dce-a6ae-472a-9f8c-22a714ea5ce0",
+ "name": "clip",
+ "type": "ClipField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "conditioning": {
+ "id": "37cf3a9d-f6b7-4b64-8ff6-2558c5ecc447",
+ "name": "conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "output"
+ }
+ },
+ "label": "Positive Compel Prompt",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 235,
+ "position": {
+ "x": 1000,
+ "y": -75
+ }
+ },
+ {
+ "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
+ "type": "rand_int",
+ "inputs": {
+ "low": {
+ "id": "3ec65a37-60ba-4b6c-a0b2-553dd7a84b84",
+ "name": "low",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "high": {
+ "id": "085f853a-1a5f-494d-8bec-e4ba29a3f2d1",
+ "name": "high",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 2147483647
+ }
+ },
+ "outputs": {
+ "value": {
+ "id": "812ade4d-7699-4261-b9fc-a6c9d2ab55ee",
+ "name": "value",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "Random Seed",
+ "isOpen": false,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 32,
+ "position": {
+ "x": 1000,
+ "y": 275
+ }
+ },
+ {
+ "id": "75899702-fa44-46d2-b2d5-3e17f234c3e7",
+ "type": "invocation",
+ "data": {
+ "version": "1.0.0",
+ "id": "75899702-fa44-46d2-b2d5-3e17f234c3e7",
+ "type": "denoise_latents",
+ "inputs": {
+ "noise": {
+ "id": "8b18f3eb-40d2-45c1-9a9d-28d6af0dce2b",
+ "name": "noise",
+ "type": "LatentsField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "steps": {
+ "id": "0be4373c-46f3-441c-80a7-a4bb6ceb498c",
+ "name": "steps",
+ "type": "integer",
+ "fieldKind": "input",
+ "label": "",
+ "value": 36
+ },
+ "cfg_scale": {
+ "id": "107267ce-4666-4cd7-94b3-7476b7973ae9",
+ "name": "cfg_scale",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 7.5
+ },
+ "denoising_start": {
+ "id": "d2ce9f0f-5fc2-48b2-b917-53442941e9a1",
+ "name": "denoising_start",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 0
+ },
+ "denoising_end": {
+ "id": "8ad51505-b8d0-422a-beb8-96fc6fc6b65f",
+ "name": "denoising_end",
+ "type": "float",
+ "fieldKind": "input",
+ "label": "",
+ "value": 1
+ },
+ "scheduler": {
+ "id": "53092874-a43b-4623-91a2-76e62fdb1f2e",
+ "name": "scheduler",
+ "type": "Scheduler",
+ "fieldKind": "input",
+ "label": "",
+ "value": "euler"
+ },
+ "control": {
+ "id": "7abe57cc-469d-437e-ad72-a18efa28215f",
+ "name": "control",
+ "type": "ControlField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "latents": {
+ "id": "add8bbe5-14d0-42d4-a867-9c65ab8dd129",
+ "name": "latents",
+ "type": "LatentsField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "denoise_mask": {
+ "id": "f373a190-0fc8-45b7-ae62-c4aa8e9687e1",
+ "name": "denoise_mask",
+ "type": "DenoiseMaskField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "positive_conditioning": {
+ "id": "c7160303-8a23-4f15-9197-855d48802a7f",
+ "name": "positive_conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "negative_conditioning": {
+ "id": "fd750efa-1dfc-4d0b-accb-828e905ba320",
+ "name": "negative_conditioning",
+ "type": "ConditioningField",
+ "fieldKind": "input",
+ "label": ""
+ },
+ "unet": {
+ "id": "af1f41ba-ce2a-4314-8d7f-494bb5800381",
+ "name": "unet",
+ "type": "UNetField",
+ "fieldKind": "input",
+ "label": ""
+ }
+ },
+ "outputs": {
+ "latents": {
+ "id": "8508d04d-f999-4a44-94d0-388ab1401d27",
+ "name": "latents",
+ "type": "LatentsField",
+ "fieldKind": "output"
+ },
+ "width": {
+ "id": "93dc8287-0a2a-4320-83a4-5e994b7ba23e",
+ "name": "width",
+ "type": "integer",
+ "fieldKind": "output"
+ },
+ "height": {
+ "id": "d9862f5c-0ab5-46fa-8c29-5059bb581d96",
+ "name": "height",
+ "type": "integer",
+ "fieldKind": "output"
+ }
+ },
+ "label": "",
+ "isOpen": true,
+ "notes": "",
+ "embedWorkflow": false,
+ "isIntermediate": true
+ },
+ "width": 320,
+ "height": 558,
+ "position": {
+ "x": 1400,
+ "y": 200
+ }
+ }
+ ],
+ "edges": [
+ {
+ "source": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
+ "sourceHandle": "value",
+ "target": "55705012-79b9-4aac-9f26-c0b10309785b",
+ "targetHandle": "seed",
+ "id": "reactflow__edge-ea94bc37-d995-4a83-aa99-4af42479f2f2value-55705012-79b9-4aac-9f26-c0b10309785bseed",
+ "type": "default"
+ },
+ {
+ "source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
+ "sourceHandle": "clip",
+ "target": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
+ "targetHandle": "clip",
+ "id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8clip-7d8bf987-284f-413a-b2fd-d825445a5d6cclip",
+ "type": "default"
+ },
+ {
+ "source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
+ "sourceHandle": "clip",
+ "target": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
+ "targetHandle": "clip",
+ "id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8clip-93dc02a4-d05b-48ed-b99c-c9b616af3402clip",
+ "type": "default"
+ },
+ {
+ "source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
+ "sourceHandle": "vae",
+ "target": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
+ "targetHandle": "vae",
+ "id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8vae-dbcd2f98-d809-48c8-bf64-2635f88a2fe9vae",
+ "type": "default"
+ },
+ {
+ "source": "75899702-fa44-46d2-b2d5-3e17f234c3e7",
+ "sourceHandle": "latents",
+ "target": "dbcd2f98-d809-48c8-bf64-2635f88a2fe9",
+ "targetHandle": "latents",
+ "id": "reactflow__edge-75899702-fa44-46d2-b2d5-3e17f234c3e7latents-dbcd2f98-d809-48c8-bf64-2635f88a2fe9latents",
+ "type": "default"
+ },
+ {
+ "source": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
+ "sourceHandle": "conditioning",
+ "target": "75899702-fa44-46d2-b2d5-3e17f234c3e7",
+ "targetHandle": "positive_conditioning",
+ "id": "reactflow__edge-7d8bf987-284f-413a-b2fd-d825445a5d6cconditioning-75899702-fa44-46d2-b2d5-3e17f234c3e7positive_conditioning",
+ "type": "default"
+ },
+ {
+ "source": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
+ "sourceHandle": "conditioning",
+ "target": "75899702-fa44-46d2-b2d5-3e17f234c3e7",
+ "targetHandle": "negative_conditioning",
+ "id": "reactflow__edge-93dc02a4-d05b-48ed-b99c-c9b616af3402conditioning-75899702-fa44-46d2-b2d5-3e17f234c3e7negative_conditioning",
+ "type": "default"
+ },
+ {
+ "source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
+ "sourceHandle": "unet",
+ "target": "75899702-fa44-46d2-b2d5-3e17f234c3e7",
+ "targetHandle": "unet",
+ "id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8unet-75899702-fa44-46d2-b2d5-3e17f234c3e7unet",
+ "type": "default"
+ },
+ {
+ "source": "55705012-79b9-4aac-9f26-c0b10309785b",
+ "sourceHandle": "noise",
+ "target": "75899702-fa44-46d2-b2d5-3e17f234c3e7",
+ "targetHandle": "noise",
+ "id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-75899702-fa44-46d2-b2d5-3e17f234c3e7noise",
+ "type": "default"
+ }
+ ]
+}
diff --git a/installer/create_installer.sh b/installer/create_installer.sh
index 134c7313ca..4e0771fecc 100755
--- a/installer/create_installer.sh
+++ b/installer/create_installer.sh
@@ -14,7 +14,7 @@ fi
VERSION=$(cd ..; python -c "from invokeai.version import __version__ as version; print(version)")
PATCH=""
VERSION="v${VERSION}${PATCH}"
-LATEST_TAG="v3.0-latest"
+LATEST_TAG="v3-latest"
echo Building installer for version $VERSION
echo "Be certain that you're in the 'installer' directory before continuing."
diff --git a/installer/lib/main.py b/installer/lib/main.py
index 5c9e6f8229..b3582f2fdc 100644
--- a/installer/lib/main.py
+++ b/installer/lib/main.py
@@ -5,6 +5,7 @@ InvokeAI Installer
import argparse
import os
from pathlib import Path
+
from installer import Installer
if __name__ == "__main__":
diff --git a/invokeai/app/api/dependencies.py b/invokeai/app/api/dependencies.py
index 39642df19b..3a54280234 100644
--- a/invokeai/app/api/dependencies.py
+++ b/invokeai/app/api/dependencies.py
@@ -1,13 +1,9 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from logging import Logger
-from invokeai.app.services.board_image_record_storage import (
- SqliteBoardImageRecordStorage,
-)
-from invokeai.app.services.board_images import (
- BoardImagesService,
- BoardImagesServiceDependencies,
-)
+
+from invokeai.app.services.board_image_record_storage import SqliteBoardImageRecordStorage
+from invokeai.app.services.board_images import BoardImagesService, BoardImagesServiceDependencies
from invokeai.app.services.board_record_storage import SqliteBoardRecordStorage
from invokeai.app.services.boards import BoardService, BoardServiceDependencies
from invokeai.app.services.config import InvokeAIAppConfig
@@ -19,16 +15,16 @@ from invokeai.backend.util.logging import InvokeAILogger
from invokeai.version.invokeai_version import __version__
from ..services.default_graphs import create_system_graphs
-from ..services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
from ..services.graph import GraphExecutionState, LibraryGraph
from ..services.image_file_storage import DiskImageFileStorage
from ..services.invocation_queue import MemoryInvocationQueue
from ..services.invocation_services import InvocationServices
+from ..services.invocation_stats import InvocationStatsService
from ..services.invoker import Invoker
+from ..services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
+from ..services.model_manager_service import ModelManagerService
from ..services.processor import DefaultInvocationProcessor
from ..services.sqlite import SqliteItemStorage
-from ..services.model_manager_service import ModelManagerService
-from ..services.invocation_stats import InvocationStatsService
from .events import FastAPIEventService
diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py
index 72ce94e9cf..7e798a3658 100644
--- a/invokeai/app/api/routers/images.py
+++ b/invokeai/app/api/routers/images.py
@@ -1,20 +1,17 @@
import io
from typing import Optional
-from PIL import Image
from fastapi import Body, HTTPException, Path, Query, Request, Response, UploadFile
from fastapi.responses import FileResponse
from fastapi.routing import APIRouter
+from PIL import Image
from pydantic import BaseModel, Field
from invokeai.app.invocations.metadata import ImageMetadata
from invokeai.app.models.image import ImageCategory, ResourceOrigin
from invokeai.app.services.image_record_storage import OffsetPaginatedResults
-from invokeai.app.services.models.image_record import (
- ImageDTO,
- ImageRecordChanges,
- ImageUrlsDTO,
-)
+from invokeai.app.services.models.image_record import ImageDTO, ImageRecordChanges, ImageUrlsDTO
+
from ..dependencies import ApiDependencies
images_router = APIRouter(prefix="/v1/images", tags=["images"])
diff --git a/invokeai/app/api/routers/models.py b/invokeai/app/api/routers/models.py
index b6c1edbbe1..276eda902d 100644
--- a/invokeai/app/api/routers/models.py
+++ b/invokeai/app/api/routers/models.py
@@ -2,7 +2,7 @@
import pathlib
-from typing import Literal, List, Optional, Union
+from typing import List, Literal, Optional, Union
from fastapi import Body, Path, Query, Response
from fastapi.routing import APIRouter
@@ -10,13 +10,13 @@ from pydantic import BaseModel, parse_obj_as
from starlette.exceptions import HTTPException
from invokeai.backend import BaseModelType, ModelType
+from invokeai.backend.model_management import MergeInterpolationMethod
from invokeai.backend.model_management.models import (
OPENAPI_MODEL_CONFIGS,
- SchedulerPredictionType,
- ModelNotFoundException,
InvalidModelException,
+ ModelNotFoundException,
+ SchedulerPredictionType,
)
-from invokeai.backend.model_management import MergeInterpolationMethod
from ..dependencies import ApiDependencies
diff --git a/invokeai/app/api/routers/sessions.py b/invokeai/app/api/routers/sessions.py
index d5624048e8..e950e46e9f 100644
--- a/invokeai/app/api/routers/sessions.py
+++ b/invokeai/app/api/routers/sessions.py
@@ -9,13 +9,7 @@ from pydantic.fields import Field
# Importing * is bad karma but needed here for node detection
from ...invocations import * # noqa: F401 F403
from ...invocations.baseinvocation import BaseInvocation
-from ...services.graph import (
- Edge,
- EdgeConnection,
- Graph,
- GraphExecutionState,
- NodeAlreadyExecutedError,
-)
+from ...services.graph import Edge, EdgeConnection, Graph, GraphExecutionState, NodeAlreadyExecutedError
from ...services.item_storage import PaginatedResults
from ..dependencies import ApiDependencies
diff --git a/invokeai/app/cli/commands.py b/invokeai/app/cli/commands.py
index 64ea6034fc..b000abcf6a 100644
--- a/invokeai/app/cli/commands.py
+++ b/invokeai/app/cli/commands.py
@@ -1,16 +1,18 @@
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
-from abc import ABC, abstractmethod
import argparse
+from abc import ABC, abstractmethod
from typing import Any, Callable, Iterable, Literal, Union, get_args, get_origin, get_type_hints
-from pydantic import BaseModel, Field
-import networkx as nx
+
import matplotlib.pyplot as plt
+import networkx as nx
+from pydantic import BaseModel, Field
import invokeai.backend.util.logging as logger
+
from ..invocations.baseinvocation import BaseInvocation
from ..invocations.image import ImageField
-from ..services.graph import GraphExecutionState, LibraryGraph, Edge
+from ..services.graph import Edge, GraphExecutionState, LibraryGraph
from ..services.invoker import Invoker
diff --git a/invokeai/app/cli/completer.py b/invokeai/app/cli/completer.py
index 086d8713d7..5aece8a058 100644
--- a/invokeai/app/cli/completer.py
+++ b/invokeai/app/cli/completer.py
@@ -6,15 +6,15 @@ completer object.
import atexit
import readline
import shlex
-
from pathlib import Path
-from typing import List, Dict, Literal, get_args, get_type_hints, get_origin
+from typing import Dict, List, Literal, get_args, get_origin, get_type_hints
import invokeai.backend.util.logging as logger
+
from ...backend import ModelManager
from ..invocations.baseinvocation import BaseInvocation
-from .commands import BaseCommand
from ..services.invocation_services import InvocationServices
+from .commands import BaseCommand
# singleton object, class variable
completer = None
diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py
index 9084a7bf48..9fbd920dd2 100644
--- a/invokeai/app/invocations/baseinvocation.py
+++ b/invokeai/app/invocations/baseinvocation.py
@@ -3,10 +3,10 @@
from __future__ import annotations
import json
+import re
from abc import ABC, abstractmethod
from enum import Enum
from inspect import signature
-import re
from typing import (
TYPE_CHECKING,
AbstractSet,
@@ -23,10 +23,10 @@ from typing import (
get_type_hints,
)
-from pydantic import BaseModel, Field, validator
-from pydantic.fields import Undefined, ModelField
-from pydantic.typing import NoArgAnyCallable
import semver
+from pydantic import BaseModel, Field, validator
+from pydantic.fields import ModelField, Undefined
+from pydantic.typing import NoArgAnyCallable
from invokeai.app.services.config.invokeai_config import InvokeAIAppConfig
@@ -198,6 +198,7 @@ class _InputField(BaseModel):
ui_type: Optional[UIType]
ui_component: Optional[UIComponent]
ui_order: Optional[int]
+ ui_choice_labels: Optional[dict[str, str]]
item_default: Optional[Any]
@@ -246,6 +247,7 @@ def InputField(
ui_component: Optional[UIComponent] = None,
ui_hidden: bool = False,
ui_order: Optional[int] = None,
+ ui_choice_labels: Optional[dict[str, str]] = None,
item_default: Optional[Any] = None,
**kwargs: Any,
) -> Any:
@@ -312,6 +314,7 @@ def InputField(
ui_hidden=ui_hidden,
ui_order=ui_order,
item_default=item_default,
+ ui_choice_labels=ui_choice_labels,
**kwargs,
)
diff --git a/invokeai/app/invocations/collections.py b/invokeai/app/invocations/collections.py
index 2814a9c3ca..702eb99831 100644
--- a/invokeai/app/invocations/collections.py
+++ b/invokeai/app/invocations/collections.py
@@ -38,14 +38,16 @@ class RangeInvocation(BaseInvocation):
version="1.0.0",
)
class RangeOfSizeInvocation(BaseInvocation):
- """Creates a range from start to start + size with step"""
+ """Creates a range from start to start + (size * step) incremented by step"""
start: int = InputField(default=0, description="The start of the range")
- size: int = InputField(default=1, description="The number of values")
+ size: int = InputField(default=1, gt=0, description="The number of values")
step: int = InputField(default=1, description="The step of the range")
def invoke(self, context: InvocationContext) -> IntegerCollectionOutput:
- return IntegerCollectionOutput(collection=list(range(self.start, self.start + self.size, self.step)))
+ return IntegerCollectionOutput(
+ collection=list(range(self.start, self.start + (self.step * self.size), self.step))
+ )
@invocation(
diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py
index 4557c57820..3fdbf9b6e9 100644
--- a/invokeai/app/invocations/compel.py
+++ b/invokeai/app/invocations/compel.py
@@ -5,16 +5,15 @@ from typing import List, Union
import torch
from compel import Compel, ReturnedEmbeddingsType
from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment
-from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput
+from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput
from invokeai.backend.stable_diffusion.diffusion.shared_invokeai_diffusion import (
BasicConditioningInfo,
SDXLConditioningInfo,
)
-from ...backend.model_management.models import ModelType
from ...backend.model_management.lora import ModelPatcher
-from ...backend.model_management.models import ModelNotFoundException
+from ...backend.model_management.models import ModelNotFoundException, ModelType
from ...backend.stable_diffusion.diffusion import InvokeAIDiffuserComponent
from ...backend.util.devices import torch_dtype
from .baseinvocation import (
diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py
index 2c2eab9155..7f81c0bc7e 100644
--- a/invokeai/app/invocations/controlnet_image_processors.py
+++ b/invokeai/app/invocations/controlnet_image_processors.py
@@ -28,15 +28,14 @@ from pydantic import BaseModel, Field, validator
from invokeai.app.invocations.primitives import ImageField, ImageOutput
-
from ...backend.model_management import BaseModelType
from ..models.image import ImageCategory, ResourceOrigin
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
- InputField,
Input,
+ InputField,
InvocationContext,
OutputField,
UIType,
@@ -44,7 +43,6 @@ from .baseinvocation import (
invocation_output,
)
-
CONTROLNET_MODE_VALUES = Literal["balanced", "more_prompt", "more_control", "unbalanced"]
CONTROLNET_RESIZE_VALUES = Literal[
"just_resize",
diff --git a/invokeai/app/invocations/cv.py b/invokeai/app/invocations/cv.py
index 411aff8234..cbe76091d6 100644
--- a/invokeai/app/invocations/cv.py
+++ b/invokeai/app/invocations/cv.py
@@ -4,9 +4,10 @@
import cv2 as cv
import numpy
from PIL import Image, ImageOps
-from invokeai.app.invocations.primitives import ImageField, ImageOutput
+from invokeai.app.invocations.primitives import ImageField, ImageOutput
from invokeai.app.models.image import ImageCategory, ResourceOrigin
+
from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation
diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py
index dd7f927b6f..fda7561679 100644
--- a/invokeai/app/invocations/image.py
+++ b/invokeai/app/invocations/image.py
@@ -98,7 +98,7 @@ class ImageCropInvocation(BaseInvocation):
)
-@invocation("img_paste", title="Paste Image", tags=["image", "paste"], category="image", version="1.0.0")
+@invocation("img_paste", title="Paste Image", tags=["image", "paste"], category="image", version="1.0.1")
class ImagePasteInvocation(BaseInvocation):
"""Pastes an image into another image."""
@@ -110,6 +110,7 @@ class ImagePasteInvocation(BaseInvocation):
)
x: int = InputField(default=0, description="The left x coordinate at which to paste the image")
y: int = InputField(default=0, description="The top y coordinate at which to paste the image")
+ crop: bool = InputField(default=False, description="Crop to base image dimensions")
def invoke(self, context: InvocationContext) -> ImageOutput:
base_image = context.services.images.get_pil_image(self.base_image.image_name)
@@ -129,6 +130,10 @@ class ImagePasteInvocation(BaseInvocation):
new_image.paste(base_image, (abs(min_x), abs(min_y)))
new_image.paste(image, (max(0, self.x), max(0, self.y)), mask=mask)
+ if self.crop:
+ base_w, base_h = base_image.size
+ new_image = new_image.crop((abs(min_x), abs(min_y), abs(min_x) + base_w, abs(min_y) + base_h))
+
image_dto = context.services.images.create(
image=new_image,
image_origin=ResourceOrigin.INTERNAL,
diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py
index ae62b4e0be..6a07891a74 100644
--- a/invokeai/app/invocations/latent.py
+++ b/invokeai/app/invocations/latent.py
@@ -34,6 +34,22 @@ from invokeai.app.invocations.primitives import (
from invokeai.app.util.controlnet_utils import prepare_control_image
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend.model_management.models import ModelType, SilenceWarnings
+
+from ...backend.model_management.lora import ModelPatcher
+from ...backend.model_management.models import BaseModelType
+from ...backend.model_management.seamless import set_seamless
+from ...backend.stable_diffusion import PipelineIntermediateState
+from ...backend.stable_diffusion.diffusers_pipeline import (
+ ConditioningData,
+ ControlNetData,
+ StableDiffusionGeneratorPipeline,
+ image_resized_to_grid_as_tensor,
+)
+from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import PostprocessingSettings
+from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
+from ...backend.util.devices import choose_precision, choose_torch_device
+from ...backend.util.logging import InvokeAILogger
+from ..models.image import ImageCategory, ResourceOrigin
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
@@ -49,21 +65,6 @@ from .baseinvocation import (
from .compel import ConditioningField
from .controlnet_image_processors import ControlField
from .model import ModelInfo, UNetField, VaeField
-from ..models.image import ImageCategory, ResourceOrigin
-from ...backend.model_management import BaseModelType
-from ...backend.model_management.lora import ModelPatcher
-from ...backend.model_management.seamless import set_seamless
-from ...backend.stable_diffusion import PipelineIntermediateState
-from ...backend.stable_diffusion.diffusers_pipeline import (
- ConditioningData,
- ControlNetData,
- StableDiffusionGeneratorPipeline,
- image_resized_to_grid_as_tensor,
-)
-from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import PostprocessingSettings
-from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
-from ...backend.util.devices import choose_precision, choose_torch_device
-from ...backend.util.logging import InvokeAILogger
DEFAULT_PRECISION = choose_precision(choose_torch_device())
diff --git a/invokeai/app/invocations/math.py b/invokeai/app/invocations/math.py
index 0bc8b7b950..ac15146478 100644
--- a/invokeai/app/invocations/math.py
+++ b/invokeai/app/invocations/math.py
@@ -1,8 +1,11 @@
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
-import numpy as np
+from typing import Literal
-from invokeai.app.invocations.primitives import IntegerOutput
+import numpy as np
+from pydantic import validator
+
+from invokeai.app.invocations.primitives import FloatOutput, IntegerOutput
from .baseinvocation import BaseInvocation, FieldDescriptions, InputField, InvocationContext, invocation
@@ -60,3 +63,201 @@ class RandomIntInvocation(BaseInvocation):
def invoke(self, context: InvocationContext) -> IntegerOutput:
return IntegerOutput(value=np.random.randint(self.low, self.high))
+
+
+@invocation(
+ "float_to_int",
+ title="Float To Integer",
+ tags=["math", "round", "integer", "float", "convert"],
+ category="math",
+ version="1.0.0",
+)
+class FloatToIntegerInvocation(BaseInvocation):
+ """Rounds a float number to (a multiple of) an integer."""
+
+ value: float = InputField(default=0, description="The value to round")
+ multiple: int = InputField(default=1, ge=1, title="Multiple of", description="The multiple to round to")
+ method: Literal["Nearest", "Floor", "Ceiling", "Truncate"] = InputField(
+ default="Nearest", description="The method to use for rounding"
+ )
+
+ def invoke(self, context: InvocationContext) -> IntegerOutput:
+ if self.method == "Nearest":
+ return IntegerOutput(value=round(self.value / self.multiple) * self.multiple)
+ elif self.method == "Floor":
+ return IntegerOutput(value=np.floor(self.value / self.multiple) * self.multiple)
+ elif self.method == "Ceiling":
+ return IntegerOutput(value=np.ceil(self.value / self.multiple) * self.multiple)
+ else: # self.method == "Truncate"
+ return IntegerOutput(value=int(self.value / self.multiple) * self.multiple)
+
+
+@invocation("round_float", title="Round Float", tags=["math", "round"], category="math", version="1.0.0")
+class RoundInvocation(BaseInvocation):
+ """Rounds a float to a specified number of decimal places."""
+
+ value: float = InputField(default=0, description="The float value")
+ decimals: int = InputField(default=0, description="The number of decimal places")
+
+ def invoke(self, context: InvocationContext) -> FloatOutput:
+ return FloatOutput(value=round(self.value, self.decimals))
+
+
+INTEGER_OPERATIONS = Literal[
+ "ADD",
+ "SUB",
+ "MUL",
+ "DIV",
+ "EXP",
+ "MOD",
+ "ABS",
+ "MIN",
+ "MAX",
+]
+
+
+INTEGER_OPERATIONS_LABELS = dict(
+ ADD="Add A+B",
+ SUB="Subtract A-B",
+ MUL="Multiply A*B",
+ DIV="Divide A/B",
+ EXP="Exponentiate A^B",
+ MOD="Modulus A%B",
+ ABS="Absolute Value of A",
+ MIN="Minimum(A,B)",
+ MAX="Maximum(A,B)",
+)
+
+
+@invocation(
+ "integer_math",
+ title="Integer Math",
+ tags=[
+ "math",
+ "integer",
+ "add",
+ "subtract",
+ "multiply",
+ "divide",
+ "modulus",
+ "power",
+ "absolute value",
+ "min",
+ "max",
+ ],
+ category="math",
+ version="1.0.0",
+)
+class IntegerMathInvocation(BaseInvocation):
+ """Performs integer math."""
+
+ operation: INTEGER_OPERATIONS = InputField(
+ default="ADD", description="The operation to perform", ui_choice_labels=INTEGER_OPERATIONS_LABELS
+ )
+ a: int = InputField(default=0, description=FieldDescriptions.num_1)
+ b: int = InputField(default=0, description=FieldDescriptions.num_2)
+
+ @validator("b")
+ def no_unrepresentable_results(cls, v, values):
+ if values["operation"] == "DIV" and v == 0:
+ raise ValueError("Cannot divide by zero")
+ elif values["operation"] == "MOD" and v == 0:
+ raise ValueError("Cannot divide by zero")
+ elif values["operation"] == "EXP" and v < 0:
+ raise ValueError("Result of exponentiation is not an integer")
+ return v
+
+ def invoke(self, context: InvocationContext) -> IntegerOutput:
+ # Python doesn't support switch statements until 3.10, but InvokeAI supports back to 3.9
+ if self.operation == "ADD":
+ return IntegerOutput(value=self.a + self.b)
+ elif self.operation == "SUB":
+ return IntegerOutput(value=self.a - self.b)
+ elif self.operation == "MUL":
+ return IntegerOutput(value=self.a * self.b)
+ elif self.operation == "DIV":
+ return IntegerOutput(value=int(self.a / self.b))
+ elif self.operation == "EXP":
+ return IntegerOutput(value=self.a**self.b)
+ elif self.operation == "MOD":
+ return IntegerOutput(value=self.a % self.b)
+ elif self.operation == "ABS":
+ return IntegerOutput(value=abs(self.a))
+ elif self.operation == "MIN":
+ return IntegerOutput(value=min(self.a, self.b))
+ else: # self.operation == "MAX":
+ return IntegerOutput(value=max(self.a, self.b))
+
+
+FLOAT_OPERATIONS = Literal[
+ "ADD",
+ "SUB",
+ "MUL",
+ "DIV",
+ "EXP",
+ "ABS",
+ "SQRT",
+ "MIN",
+ "MAX",
+]
+
+
+FLOAT_OPERATIONS_LABELS = dict(
+ ADD="Add A+B",
+ SUB="Subtract A-B",
+ MUL="Multiply A*B",
+ DIV="Divide A/B",
+ EXP="Exponentiate A^B",
+ ABS="Absolute Value of A",
+ SQRT="Square Root of A",
+ MIN="Minimum(A,B)",
+ MAX="Maximum(A,B)",
+)
+
+
+@invocation(
+ "float_math",
+ title="Float Math",
+ tags=["math", "float", "add", "subtract", "multiply", "divide", "power", "root", "absolute value", "min", "max"],
+ category="math",
+ version="1.0.0",
+)
+class FloatMathInvocation(BaseInvocation):
+ """Performs floating point math."""
+
+ operation: FLOAT_OPERATIONS = InputField(
+ default="ADD", description="The operation to perform", ui_choice_labels=FLOAT_OPERATIONS_LABELS
+ )
+ a: float = InputField(default=0, description=FieldDescriptions.num_1)
+ b: float = InputField(default=0, description=FieldDescriptions.num_2)
+
+ @validator("b")
+ def no_unrepresentable_results(cls, v, values):
+ if values["operation"] == "DIV" and v == 0:
+ raise ValueError("Cannot divide by zero")
+ elif values["operation"] == "EXP" and values["a"] == 0 and v < 0:
+ raise ValueError("Cannot raise zero to a negative power")
+ elif values["operation"] == "EXP" and type(values["a"] ** v) is complex:
+ raise ValueError("Root operation resulted in a complex number")
+ return v
+
+ def invoke(self, context: InvocationContext) -> FloatOutput:
+ # Python doesn't support switch statements until 3.10, but InvokeAI supports back to 3.9
+ if self.operation == "ADD":
+ return FloatOutput(value=self.a + self.b)
+ elif self.operation == "SUB":
+ return FloatOutput(value=self.a - self.b)
+ elif self.operation == "MUL":
+ return FloatOutput(value=self.a * self.b)
+ elif self.operation == "DIV":
+ return FloatOutput(value=self.a / self.b)
+ elif self.operation == "EXP":
+ return FloatOutput(value=self.a**self.b)
+ elif self.operation == "SQRT":
+ return FloatOutput(value=np.sqrt(self.a))
+ elif self.operation == "ABS":
+ return FloatOutput(value=abs(self.a))
+ elif self.operation == "MIN":
+ return FloatOutput(value=min(self.a, self.b))
+ else: # self.operation == "MAX":
+ return FloatOutput(value=max(self.a, self.b))
diff --git a/invokeai/app/invocations/onnx.py b/invokeai/app/invocations/onnx.py
index d346a5f58f..213ce82a75 100644
--- a/invokeai/app/invocations/onnx.py
+++ b/invokeai/app/invocations/onnx.py
@@ -25,8 +25,8 @@ from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
- InputField,
Input,
+ InputField,
InvocationContext,
OutputField,
UIComponent,
diff --git a/invokeai/app/invocations/param_easing.py b/invokeai/app/invocations/param_easing.py
index 9cfe447372..7c327a6657 100644
--- a/invokeai/app/invocations/param_easing.py
+++ b/invokeai/app/invocations/param_easing.py
@@ -3,7 +3,6 @@ from typing import Literal, Optional
import matplotlib.pyplot as plt
import numpy as np
-
import PIL.Image
from easing_functions import (
BackEaseIn,
diff --git a/invokeai/app/invocations/strings.py b/invokeai/app/invocations/strings.py
new file mode 100644
index 0000000000..3466206b37
--- /dev/null
+++ b/invokeai/app/invocations/strings.py
@@ -0,0 +1,139 @@
+# 2023 skunkworxdark (https://github.com/skunkworxdark)
+
+import re
+
+from .baseinvocation import (
+ BaseInvocation,
+ BaseInvocationOutput,
+ InputField,
+ InvocationContext,
+ OutputField,
+ UIComponent,
+ invocation,
+ invocation_output,
+)
+from .primitives import StringOutput
+
+
+@invocation_output("string_pos_neg_output")
+class StringPosNegOutput(BaseInvocationOutput):
+ """Base class for invocations that output a positive and negative string"""
+
+ positive_string: str = OutputField(description="Positive string")
+ negative_string: str = OutputField(description="Negative string")
+
+
+@invocation(
+ "string_split_neg",
+ title="String Split Negative",
+ tags=["string", "split", "negative"],
+ category="string",
+ version="1.0.0",
+)
+class StringSplitNegInvocation(BaseInvocation):
+ """Splits string into two strings, inside [] goes into negative string everthing else goes into positive string. Each [ and ] character is replaced with a space"""
+
+ string: str = InputField(default="", description="String to split", ui_component=UIComponent.Textarea)
+
+ def invoke(self, context: InvocationContext) -> StringPosNegOutput:
+ p_string = ""
+ n_string = ""
+ brackets_depth = 0
+ escaped = False
+
+ for char in self.string or "":
+ if char == "[" and not escaped:
+ n_string += " "
+ brackets_depth += 1
+ elif char == "]" and not escaped:
+ brackets_depth -= 1
+ char = " "
+ elif brackets_depth > 0:
+ n_string += char
+ else:
+ p_string += char
+
+ # keep track of the escape char but only if it isn't escaped already
+ if char == "\\" and not escaped:
+ escaped = True
+ else:
+ escaped = False
+
+ return StringPosNegOutput(positive_string=p_string, negative_string=n_string)
+
+
+@invocation_output("string_2_output")
+class String2Output(BaseInvocationOutput):
+ """Base class for invocations that output two strings"""
+
+ string_1: str = OutputField(description="string 1")
+ string_2: str = OutputField(description="string 2")
+
+
+@invocation("string_split", title="String Split", tags=["string", "split"], category="string", version="1.0.0")
+class StringSplitInvocation(BaseInvocation):
+ """Splits string into two strings, based on the first occurance of the delimiter. The delimiter will be removed from the string"""
+
+ string: str = InputField(default="", description="String to split", ui_component=UIComponent.Textarea)
+ delimiter: str = InputField(
+ default="", description="Delimiter to spilt with. blank will split on the first whitespace"
+ )
+
+ def invoke(self, context: InvocationContext) -> String2Output:
+ result = self.string.split(self.delimiter, 1)
+ if len(result) == 2:
+ part1, part2 = result
+ else:
+ part1 = result[0]
+ part2 = ""
+
+ return String2Output(string_1=part1, string_2=part2)
+
+
+@invocation("string_join", title="String Join", tags=["string", "join"], category="string", version="1.0.0")
+class StringJoinInvocation(BaseInvocation):
+ """Joins string left to string right"""
+
+ string_left: str = InputField(default="", description="String Left", ui_component=UIComponent.Textarea)
+ string_right: str = InputField(default="", description="String Right", ui_component=UIComponent.Textarea)
+
+ def invoke(self, context: InvocationContext) -> StringOutput:
+ return StringOutput(value=((self.string_left or "") + (self.string_right or "")))
+
+
+@invocation("string_join_three", title="String Join Three", tags=["string", "join"], category="string", version="1.0.0")
+class StringJoinThreeInvocation(BaseInvocation):
+ """Joins string left to string middle to string right"""
+
+ string_left: str = InputField(default="", description="String Left", ui_component=UIComponent.Textarea)
+ string_middle: str = InputField(default="", description="String Middle", ui_component=UIComponent.Textarea)
+ string_right: str = InputField(default="", description="String Right", ui_component=UIComponent.Textarea)
+
+ def invoke(self, context: InvocationContext) -> StringOutput:
+ return StringOutput(value=((self.string_left or "") + (self.string_middle or "") + (self.string_right or "")))
+
+
+@invocation(
+ "string_replace", title="String Replace", tags=["string", "replace", "regex"], category="string", version="1.0.0"
+)
+class StringReplaceInvocation(BaseInvocation):
+ """Replaces the search string with the replace string"""
+
+ string: str = InputField(default="", description="String to work on", ui_component=UIComponent.Textarea)
+ search_string: str = InputField(default="", description="String to search for", ui_component=UIComponent.Textarea)
+ replace_string: str = InputField(
+ default="", description="String to replace the search", ui_component=UIComponent.Textarea
+ )
+ use_regex: bool = InputField(
+ default=False, description="Use search string as a regex expression (non regex is case insensitive)"
+ )
+
+ def invoke(self, context: InvocationContext) -> StringOutput:
+ pattern = self.search_string or ""
+ new_string = self.string or ""
+ if len(pattern) > 0:
+ if not self.use_regex:
+ # None regex so make case insensitve
+ pattern = "(?i)" + re.escape(pattern)
+ new_string = re.sub(pattern, (self.replace_string or ""), new_string)
+ return StringOutput(value=new_string)
diff --git a/invokeai/app/invocations/upscale.py b/invokeai/app/invocations/upscale.py
index 7dca6d9f21..9dc2eed2c1 100644
--- a/invokeai/app/invocations/upscale.py
+++ b/invokeai/app/invocations/upscale.py
@@ -7,8 +7,8 @@ import numpy as np
from basicsr.archs.rrdbnet_arch import RRDBNet
from PIL import Image
from realesrgan import RealESRGANer
-from invokeai.app.invocations.primitives import ImageField, ImageOutput
+from invokeai.app.invocations.primitives import ImageField, ImageOutput
from invokeai.app.models.image import ImageCategory, ResourceOrigin
from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation
diff --git a/invokeai/app/services/board_image_record_storage.py b/invokeai/app/services/board_image_record_storage.py
index 03badf9866..d35f9f2179 100644
--- a/invokeai/app/services/board_image_record_storage.py
+++ b/invokeai/app/services/board_image_record_storage.py
@@ -1,13 +1,10 @@
-from abc import ABC, abstractmethod
import sqlite3
import threading
+from abc import ABC, abstractmethod
from typing import Optional, cast
from invokeai.app.services.image_record_storage import OffsetPaginatedResults
-from invokeai.app.services.models.image_record import (
- ImageRecord,
- deserialize_image_record,
-)
+from invokeai.app.services.models.image_record import ImageRecord, deserialize_image_record
class BoardImageRecordStorageBase(ABC):
diff --git a/invokeai/app/services/board_images.py b/invokeai/app/services/board_images.py
index 7c6995dfc8..788722ec37 100644
--- a/invokeai/app/services/board_images.py
+++ b/invokeai/app/services/board_images.py
@@ -1,12 +1,9 @@
from abc import ABC, abstractmethod
from logging import Logger
from typing import Optional
-from invokeai.app.services.board_image_record_storage import BoardImageRecordStorageBase
-from invokeai.app.services.board_record_storage import (
- BoardRecord,
- BoardRecordStorageBase,
-)
+from invokeai.app.services.board_image_record_storage import BoardImageRecordStorageBase
+from invokeai.app.services.board_record_storage import BoardRecord, BoardRecordStorageBase
from invokeai.app.services.image_record_storage import ImageRecordStorageBase
from invokeai.app.services.models.board_record import BoardDTO
from invokeai.app.services.urls import UrlServiceBase
diff --git a/invokeai/app/services/board_record_storage.py b/invokeai/app/services/board_record_storage.py
index 593bb3e7b9..f4876b6935 100644
--- a/invokeai/app/services/board_record_storage.py
+++ b/invokeai/app/services/board_record_storage.py
@@ -1,15 +1,13 @@
+import sqlite3
import threading
import uuid
from abc import ABC, abstractmethod
from typing import Optional, Union, cast
-import sqlite3
+from pydantic import BaseModel, Extra, Field
+
from invokeai.app.services.image_record_storage import OffsetPaginatedResults
-from invokeai.app.services.models.board_record import (
- BoardRecord,
- deserialize_board_record,
-)
-from pydantic import BaseModel, Field, Extra
+from invokeai.app.services.models.board_record import BoardRecord, deserialize_board_record
class BoardChanges(BaseModel, extra=Extra.forbid):
diff --git a/invokeai/app/services/boards.py b/invokeai/app/services/boards.py
index 53d30b2e85..e7a516da65 100644
--- a/invokeai/app/services/boards.py
+++ b/invokeai/app/services/boards.py
@@ -1,17 +1,10 @@
from abc import ABC, abstractmethod
-
from logging import Logger
+
from invokeai.app.services.board_image_record_storage import BoardImageRecordStorageBase
from invokeai.app.services.board_images import board_record_to_dto
-
-from invokeai.app.services.board_record_storage import (
- BoardChanges,
- BoardRecordStorageBase,
-)
-from invokeai.app.services.image_record_storage import (
- ImageRecordStorageBase,
- OffsetPaginatedResults,
-)
+from invokeai.app.services.board_record_storage import BoardChanges, BoardRecordStorageBase
+from invokeai.app.services.image_record_storage import ImageRecordStorageBase, OffsetPaginatedResults
from invokeai.app.services.models.board_record import BoardDTO
from invokeai.app.services.urls import UrlServiceBase
diff --git a/invokeai/app/services/config/__init__.py b/invokeai/app/services/config/__init__.py
index 94eafac10a..a404f33638 100644
--- a/invokeai/app/services/config/__init__.py
+++ b/invokeai/app/services/config/__init__.py
@@ -2,8 +2,5 @@
Init file for InvokeAI configure package
"""
-from .invokeai_config import ( # noqa F401
- InvokeAIAppConfig,
- get_invokeai_config,
-)
from .base import PagingArgumentParser # noqa F401
+from .invokeai_config import InvokeAIAppConfig, get_invokeai_config # noqa F401
diff --git a/invokeai/app/services/config/base.py b/invokeai/app/services/config/base.py
index 33fd87b03a..02c6f15140 100644
--- a/invokeai/app/services/config/base.py
+++ b/invokeai/app/services/config/base.py
@@ -9,15 +9,17 @@ the command line.
"""
from __future__ import annotations
+
import argparse
import os
import pydoc
import sys
from argparse import ArgumentParser
-from omegaconf import OmegaConf, DictConfig, ListConfig
from pathlib import Path
+from typing import ClassVar, Dict, List, Literal, Union, get_args, get_origin, get_type_hints
+
+from omegaconf import DictConfig, ListConfig, OmegaConf
from pydantic import BaseSettings
-from typing import ClassVar, Dict, List, Literal, Union, get_origin, get_type_hints, get_args
class PagingArgumentParser(argparse.ArgumentParser):
diff --git a/invokeai/app/services/config/invokeai_config.py b/invokeai/app/services/config/invokeai_config.py
index 7b687a28a1..237aec5205 100644
--- a/invokeai/app/services/config/invokeai_config.py
+++ b/invokeai/app/services/config/invokeai_config.py
@@ -172,9 +172,9 @@ from __future__ import annotations
import os
from pathlib import Path
-from typing import ClassVar, Dict, List, Literal, Union, get_type_hints, Optional
+from typing import ClassVar, Dict, List, Literal, Optional, Union, get_type_hints
-from omegaconf import OmegaConf, DictConfig
+from omegaconf import DictConfig, OmegaConf
from pydantic import Field, parse_obj_as
from .base import InvokeAISettings
diff --git a/invokeai/app/services/default_graphs.py b/invokeai/app/services/default_graphs.py
index 5e1a594b91..2677060655 100644
--- a/invokeai/app/services/default_graphs.py
+++ b/invokeai/app/services/default_graphs.py
@@ -1,12 +1,11 @@
-from ..invocations.latent import LatentsToImageInvocation, DenoiseLatentsInvocation
-from ..invocations.image import ImageNSFWBlurInvocation
-from ..invocations.noise import NoiseInvocation
from ..invocations.compel import CompelInvocation
+from ..invocations.image import ImageNSFWBlurInvocation
+from ..invocations.latent import DenoiseLatentsInvocation, LatentsToImageInvocation
+from ..invocations.noise import NoiseInvocation
from ..invocations.primitives import IntegerInvocation
from .graph import Edge, EdgeConnection, ExposedNodeInput, ExposedNodeOutput, Graph, LibraryGraph
from .item_storage import ItemStorageABC
-
default_text_to_image_graph_id = "539b2af5-2b4d-4d8c-8071-e54a3255fc74"
diff --git a/invokeai/app/services/events.py b/invokeai/app/services/events.py
index a266fe4f18..2bfe9b7c3d 100644
--- a/invokeai/app/services/events.py
+++ b/invokeai/app/services/events.py
@@ -1,14 +1,10 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from typing import Any, Optional
+
from invokeai.app.models.image import ProgressImage
+from invokeai.app.services.model_manager_service import BaseModelType, ModelInfo, ModelType, SubModelType
from invokeai.app.util.misc import get_timestamp
-from invokeai.app.services.model_manager_service import (
- BaseModelType,
- ModelType,
- SubModelType,
- ModelInfo,
-)
class EventServiceBase:
diff --git a/invokeai/app/services/graph.py b/invokeai/app/services/graph.py
index c47d437956..90f0ec51d1 100644
--- a/invokeai/app/services/graph.py
+++ b/invokeai/app/services/graph.py
@@ -14,12 +14,12 @@ from ..invocations import * # noqa: F401 F403
from ..invocations.baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
- invocation,
Input,
InputField,
InvocationContext,
OutputField,
UIType,
+ invocation,
invocation_output,
)
diff --git a/invokeai/app/services/image_record_storage.py b/invokeai/app/services/image_record_storage.py
index 8f1b25b84a..b9982fcfdc 100644
--- a/invokeai/app/services/image_record_storage.py
+++ b/invokeai/app/services/image_record_storage.py
@@ -9,11 +9,7 @@ from pydantic import BaseModel, Field
from pydantic.generics import GenericModel
from invokeai.app.models.image import ImageCategory, ResourceOrigin
-from invokeai.app.services.models.image_record import (
- ImageRecord,
- ImageRecordChanges,
- deserialize_image_record,
-)
+from invokeai.app.services.models.image_record import ImageRecord, ImageRecordChanges, deserialize_image_record
T = TypeVar("T", bound=BaseModel)
diff --git a/invokeai/app/services/images.py b/invokeai/app/services/images.py
index 4d8e3d6d03..2b0a3d62a5 100644
--- a/invokeai/app/services/images.py
+++ b/invokeai/app/services/images.py
@@ -26,12 +26,7 @@ from invokeai.app.services.image_record_storage import (
OffsetPaginatedResults,
)
from invokeai.app.services.item_storage import ItemStorageABC
-from invokeai.app.services.models.image_record import (
- ImageDTO,
- ImageRecord,
- ImageRecordChanges,
- image_record_to_dto,
-)
+from invokeai.app.services.models.image_record import ImageDTO, ImageRecord, ImageRecordChanges, image_record_to_dto
from invokeai.app.services.resource_name import NameServiceBase
from invokeai.app.services.urls import UrlServiceBase
from invokeai.app.util.metadata import get_metadata_graph_from_raw_session
diff --git a/invokeai/app/services/invocation_queue.py b/invokeai/app/services/invocation_queue.py
index 963d500aa8..2fe5f60826 100644
--- a/invokeai/app/services/invocation_queue.py
+++ b/invokeai/app/services/invocation_queue.py
@@ -3,9 +3,9 @@
import time
from abc import ABC, abstractmethod
from queue import Queue
+from typing import Optional
from pydantic import BaseModel, Field
-from typing import Optional
class InvocationQueueItem(BaseModel):
diff --git a/invokeai/app/services/invocation_services.py b/invokeai/app/services/invocation_services.py
index d717087958..a4ec3573f5 100644
--- a/invokeai/app/services/invocation_services.py
+++ b/invokeai/app/services/invocation_services.py
@@ -1,21 +1,23 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team
from __future__ import annotations
+
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from logging import Logger
+
from invokeai.app.services.board_images import BoardImagesServiceABC
from invokeai.app.services.boards import BoardServiceABC
- from invokeai.app.services.images import ImageServiceABC
- from invokeai.app.services.invocation_stats import InvocationStatsServiceBase
- from invokeai.app.services.model_manager_service import ModelManagerServiceBase
- from invokeai.app.services.events import EventServiceBase
- from invokeai.app.services.latent_storage import LatentsStorageBase
- from invokeai.app.services.invocation_queue import InvocationQueueABC
- from invokeai.app.services.item_storage import ItemStorageABC
from invokeai.app.services.config import InvokeAIAppConfig
+ from invokeai.app.services.events import EventServiceBase
from invokeai.app.services.graph import GraphExecutionState, LibraryGraph
+ from invokeai.app.services.images import ImageServiceABC
+ from invokeai.app.services.invocation_queue import InvocationQueueABC
+ from invokeai.app.services.invocation_stats import InvocationStatsServiceBase
from invokeai.app.services.invoker import InvocationProcessorABC
+ from invokeai.app.services.item_storage import ItemStorageABC
+ from invokeai.app.services.latent_storage import LatentsStorageBase
+ from invokeai.app.services.model_manager_service import ModelManagerServiceBase
class InvocationServices:
diff --git a/invokeai/app/services/invocation_stats.py b/invokeai/app/services/invocation_stats.py
index b42d128b51..33932f73aa 100644
--- a/invokeai/app/services/invocation_stats.py
+++ b/invokeai/app/services/invocation_stats.py
@@ -28,22 +28,22 @@ The abstract base class for this class is InvocationStatsServiceBase. An impleme
writes to the system log is stored in InvocationServices.performance_statistics.
"""
-import psutil
import time
from abc import ABC, abstractmethod
from contextlib import AbstractContextManager
from dataclasses import dataclass, field
from typing import Dict
+import psutil
import torch
import invokeai.backend.util.logging as logger
+from invokeai.backend.model_management.model_cache import CacheStats
from ..invocations.baseinvocation import BaseInvocation
from .graph import GraphExecutionState
from .item_storage import ItemStorageABC
from .model_manager_service import ModelManagerService
-from invokeai.backend.model_management.model_cache import CacheStats
# size of GIG in bytes
GIG = 1073741824
diff --git a/invokeai/app/services/latent_storage.py b/invokeai/app/services/latent_storage.py
index 7c22ea5ba8..f0b1dc9fe7 100644
--- a/invokeai/app/services/latent_storage.py
+++ b/invokeai/app/services/latent_storage.py
@@ -3,7 +3,7 @@
from abc import ABC, abstractmethod
from pathlib import Path
from queue import Queue
-from typing import Dict, Union, Optional
+from typing import Dict, Optional, Union
import torch
diff --git a/invokeai/app/services/model_manager_service.py b/invokeai/app/services/model_manager_service.py
index 11ebab7938..a7745a9253 100644
--- a/invokeai/app/services/model_manager_service.py
+++ b/invokeai/app/services/model_manager_service.py
@@ -5,27 +5,28 @@ from __future__ import annotations
from abc import ABC, abstractmethod
from logging import Logger
from pathlib import Path
-from pydantic import Field
-from typing import Literal, Optional, Union, Callable, List, Tuple, TYPE_CHECKING
from types import ModuleType
-
-from invokeai.backend.model_management import (
- ModelManager,
- BaseModelType,
- ModelType,
- SubModelType,
- ModelInfo,
- AddModelResult,
- SchedulerPredictionType,
- ModelMerger,
- MergeInterpolationMethod,
- ModelNotFoundException,
-)
-from invokeai.backend.model_management.model_search import FindModels
-from invokeai.backend.model_management.model_cache import CacheStats
+from typing import TYPE_CHECKING, Callable, List, Literal, Optional, Tuple, Union
import torch
+from pydantic import Field
+
from invokeai.app.models.exceptions import CanceledException
+from invokeai.backend.model_management import (
+ AddModelResult,
+ BaseModelType,
+ MergeInterpolationMethod,
+ ModelInfo,
+ ModelManager,
+ ModelMerger,
+ ModelNotFoundException,
+ ModelType,
+ SchedulerPredictionType,
+ SubModelType,
+)
+from invokeai.backend.model_management.model_cache import CacheStats
+from invokeai.backend.model_management.model_search import FindModels
+
from ...backend.util import choose_precision, choose_torch_device
from .config import InvokeAIAppConfig
diff --git a/invokeai/app/services/models/board_record.py b/invokeai/app/services/models/board_record.py
index 53fa299faf..4b93d0ea23 100644
--- a/invokeai/app/services/models/board_record.py
+++ b/invokeai/app/services/models/board_record.py
@@ -1,6 +1,8 @@
-from typing import Optional, Union
from datetime import datetime
+from typing import Optional, Union
+
from pydantic import Field
+
from invokeai.app.util.misc import get_iso_timestamp
from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
diff --git a/invokeai/app/services/resource_name.py b/invokeai/app/services/resource_name.py
index dd5a76cfc0..c89c06edc3 100644
--- a/invokeai/app/services/resource_name.py
+++ b/invokeai/app/services/resource_name.py
@@ -1,6 +1,6 @@
+import uuid
from abc import ABC, abstractmethod
from enum import Enum, EnumMeta
-import uuid
class ResourceType(str, Enum, metaclass=EnumMeta):
diff --git a/invokeai/app/util/controlnet_utils.py b/invokeai/app/util/controlnet_utils.py
index 1578598687..8516fa8917 100644
--- a/invokeai/app/util/controlnet_utils.py
+++ b/invokeai/app/util/controlnet_utils.py
@@ -1,12 +1,12 @@
from typing import Union
-import torch
-import numpy as np
-import cv2
-from PIL import Image
-from diffusers.utils import PIL_INTERPOLATION
-from einops import rearrange
+import cv2
+import numpy as np
+import torch
from controlnet_aux.util import HWC3
+from diffusers.utils import PIL_INTERPOLATION
+from einops import rearrange
+from PIL import Image
###################################################################
# Copy of scripts/lvminthin.py from Mikubill/sd-webui-controlnet
diff --git a/invokeai/app/util/misc.py b/invokeai/app/util/misc.py
index 8702d86c67..b42b2246b8 100644
--- a/invokeai/app/util/misc.py
+++ b/invokeai/app/util/misc.py
@@ -1,4 +1,5 @@
import datetime
+
import numpy as np
diff --git a/invokeai/app/util/model_exclude_null.py b/invokeai/app/util/model_exclude_null.py
index d864b8fab8..b75f127ec7 100644
--- a/invokeai/app/util/model_exclude_null.py
+++ b/invokeai/app/util/model_exclude_null.py
@@ -1,6 +1,6 @@
from typing import Any
-from pydantic import BaseModel
+from pydantic import BaseModel
"""
We want to exclude null values from objects that make their way to the client.
diff --git a/invokeai/app/util/step_callback.py b/invokeai/app/util/step_callback.py
index f6cccfb4b8..26de809d1c 100644
--- a/invokeai/app/util/step_callback.py
+++ b/invokeai/app/util/step_callback.py
@@ -1,11 +1,13 @@
import torch
from PIL import Image
+
from invokeai.app.models.exceptions import CanceledException
from invokeai.app.models.image import ProgressImage
-from ..invocations.baseinvocation import InvocationContext
-from ...backend.util.util import image_to_dataURL
-from ...backend.stable_diffusion import PipelineIntermediateState
+
from ...backend.model_management.models import BaseModelType
+from ...backend.stable_diffusion import PipelineIntermediateState
+from ...backend.util.util import image_to_dataURL
+from ..invocations.baseinvocation import InvocationContext
def sample_to_lowres_estimated_image(samples, latent_rgb_factors, smooth_matrix=None):
diff --git a/invokeai/app/util/thumbnails.py b/invokeai/app/util/thumbnails.py
index 42a6fe9962..ad722f197e 100644
--- a/invokeai/app/util/thumbnails.py
+++ b/invokeai/app/util/thumbnails.py
@@ -1,4 +1,5 @@
import os
+
from PIL import Image
diff --git a/invokeai/backend/__init__.py b/invokeai/backend/__init__.py
index 2e77d12eca..ae9a12edbe 100644
--- a/invokeai/backend/__init__.py
+++ b/invokeai/backend/__init__.py
@@ -1,5 +1,5 @@
"""
Initialization file for invokeai.backend
"""
-from .model_management import ModelManager, ModelCache, BaseModelType, ModelType, SubModelType, ModelInfo # noqa: F401
+from .model_management import BaseModelType, ModelCache, ModelInfo, ModelManager, ModelType, SubModelType # noqa: F401
from .model_management.models import SilenceWarnings # noqa: F401
diff --git a/invokeai/backend/image_util/invisible_watermark.py b/invokeai/backend/image_util/invisible_watermark.py
index 4605daea43..3e8604f9c3 100644
--- a/invokeai/backend/image_util/invisible_watermark.py
+++ b/invokeai/backend/image_util/invisible_watermark.py
@@ -3,12 +3,13 @@ This module defines a singleton object, "invisible_watermark" that
wraps the invisible watermark model. It respects the global "invisible_watermark"
configuration variable, that allows the watermarking to be supressed.
"""
-import numpy as np
import cv2
-from PIL import Image
+import numpy as np
from imwatermark import WatermarkEncoder
-from invokeai.app.services.config import InvokeAIAppConfig
+from PIL import Image
+
import invokeai.backend.util.logging as logger
+from invokeai.app.services.config import InvokeAIAppConfig
config = InvokeAIAppConfig.get_config()
diff --git a/invokeai/backend/image_util/patchmatch.py b/invokeai/backend/image_util/patchmatch.py
index 98055f60c8..5514e3d33a 100644
--- a/invokeai/backend/image_util/patchmatch.py
+++ b/invokeai/backend/image_util/patchmatch.py
@@ -5,6 +5,7 @@ wraps the actual patchmatch object. It respects the global
be suppressed or deferred
"""
import numpy as np
+
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
diff --git a/invokeai/backend/image_util/safety_checker.py b/invokeai/backend/image_util/safety_checker.py
index 483e563b82..fd1f05f10e 100644
--- a/invokeai/backend/image_util/safety_checker.py
+++ b/invokeai/backend/image_util/safety_checker.py
@@ -5,10 +5,11 @@ configuration variable, that allows the checker to be supressed.
"""
import numpy as np
from PIL import Image
-from invokeai.backend import SilenceWarnings
-from invokeai.app.services.config import InvokeAIAppConfig
-from invokeai.backend.util.devices import choose_torch_device
+
import invokeai.backend.util.logging as logger
+from invokeai.app.services.config import InvokeAIAppConfig
+from invokeai.backend import SilenceWarnings
+from invokeai.backend.util.devices import choose_torch_device
config = InvokeAIAppConfig.get_config()
diff --git a/invokeai/backend/install/check_root.py b/invokeai/backend/install/check_root.py
index 2104c3a4c7..6ee2aa34b7 100644
--- a/invokeai/backend/install/check_root.py
+++ b/invokeai/backend/install/check_root.py
@@ -2,9 +2,8 @@
Check that the invokeai_root is correctly configured and exit if not.
"""
import sys
-from invokeai.app.services.config import (
- InvokeAIAppConfig,
-)
+
+from invokeai.app.services.config import InvokeAIAppConfig
def check_invokeai_root(config: InvokeAIAppConfig):
diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py
index c5dce13050..0b3f50e3fc 100755
--- a/invokeai/backend/install/invokeai_configure.py
+++ b/invokeai/backend/install/invokeai_configure.py
@@ -6,68 +6,56 @@
#
# Coauthor: Kevin Turner http://github.com/keturn
#
-import sys
import argparse
import io
import os
-import psutil
import shutil
+import sys
import textwrap
-import torch
import traceback
-import yaml
import warnings
from argparse import Namespace
from enum import Enum
from pathlib import Path
from shutil import get_terminal_size
-from typing import get_type_hints, get_args, Any
+from typing import Any, get_args, get_type_hints
from urllib import request
import npyscreen
-import transformers
import omegaconf
+import psutil
+import torch
+import transformers
+import yaml
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from huggingface_hub import HfFolder
from huggingface_hub import login as hf_hub_login
from omegaconf import OmegaConf
+from pydantic.error_wrappers import ValidationError
from tqdm import tqdm
-from transformers import (
- CLIPTextModel,
- CLIPTextConfig,
- CLIPTokenizer,
- AutoFeatureExtractor,
- BertTokenizerFast,
-)
-import invokeai.configs as configs
+from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextConfig, CLIPTextModel, CLIPTokenizer
-from invokeai.app.services.config import (
- InvokeAIAppConfig,
-)
+import invokeai.configs as configs
+from invokeai.app.services.config import InvokeAIAppConfig
+from invokeai.backend.install.legacy_arg_parsing import legacy_parser
+from invokeai.backend.install.model_install_backend import InstallSelections, ModelInstall, hf_download_from_pretrained
+from invokeai.backend.model_management.model_probe import BaseModelType, ModelType
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.frontend.install.model_install import addModelsForm, process_and_execute
# TO DO - Move all the frontend code into invokeai.frontend.install
from invokeai.frontend.install.widgets import (
- SingleSelectColumnsSimple,
- MultiSelectColumns,
- CenteredButtonPress,
- FileBox,
- set_min_terminal_size,
- CyclingForm,
MIN_COLS,
MIN_LINES,
+ CenteredButtonPress,
+ CyclingForm,
+ FileBox,
+ MultiSelectColumns,
+ SingleSelectColumnsSimple,
WindowTooSmallException,
+ set_min_terminal_size,
)
-from invokeai.backend.install.legacy_arg_parsing import legacy_parser
-from invokeai.backend.install.model_install_backend import (
- hf_download_from_pretrained,
- InstallSelections,
- ModelInstall,
-)
-from invokeai.backend.model_management.model_probe import ModelType, BaseModelType
-from pydantic.error_wrappers import ValidationError
warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error()
diff --git a/invokeai/backend/install/migrate_to_3.py b/invokeai/backend/install/migrate_to_3.py
index da533ab187..ea5bee8058 100644
--- a/invokeai/backend/install/migrate_to_3.py
+++ b/invokeai/backend/install/migrate_to_3.py
@@ -3,33 +3,26 @@ Migrate the models directory and models.yaml file from an existing
InvokeAI 2.3 installation to 3.0.0.
"""
-import os
import argparse
+import os
import shutil
-import yaml
-
-import transformers
-import diffusers
import warnings
-
from dataclasses import dataclass
from pathlib import Path
-from omegaconf import OmegaConf, DictConfig
from typing import Union
-from diffusers import StableDiffusionPipeline, AutoencoderKL
+import diffusers
+import transformers
+import yaml
+from diffusers import AutoencoderKL, StableDiffusionPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
-from transformers import (
- CLIPTextModel,
- CLIPTokenizer,
- AutoFeatureExtractor,
- BertTokenizerFast,
-)
+from omegaconf import DictConfig, OmegaConf
+from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextModel, CLIPTokenizer
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_management import ModelManager
-from invokeai.backend.model_management.model_probe import ModelProbe, ModelType, BaseModelType, ModelProbeInfo
+from invokeai.backend.model_management.model_probe import BaseModelType, ModelProbe, ModelProbeInfo, ModelType
warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error()
diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py
index e41783ab09..6c9eb548ab 100644
--- a/invokeai/backend/install/model_install_backend.py
+++ b/invokeai/backend/install/model_install_backend.py
@@ -7,23 +7,23 @@ import warnings
from dataclasses import dataclass, field
from pathlib import Path
from tempfile import TemporaryDirectory
-from typing import Optional, List, Dict, Callable, Union, Set
+from typing import Callable, Dict, List, Optional, Set, Union
import requests
+import torch
from diffusers import DiffusionPipeline
from diffusers import logging as dlogging
-import torch
-from huggingface_hub import hf_hub_url, HfFolder, HfApi
+from huggingface_hub import HfApi, HfFolder, hf_hub_url
from omegaconf import OmegaConf
from tqdm import tqdm
import invokeai.configs as configs
-
from invokeai.app.services.config import InvokeAIAppConfig
-from invokeai.backend.model_management import ModelManager, ModelType, BaseModelType, ModelVariantType, AddModelResult
-from invokeai.backend.model_management.model_probe import ModelProbe, SchedulerPredictionType, ModelProbeInfo
+from invokeai.backend.model_management import AddModelResult, BaseModelType, ModelManager, ModelType, ModelVariantType
+from invokeai.backend.model_management.model_probe import ModelProbe, ModelProbeInfo, SchedulerPredictionType
from invokeai.backend.util import download_with_resume
-from invokeai.backend.util.devices import torch_dtype, choose_torch_device
+from invokeai.backend.util.devices import choose_torch_device, torch_dtype
+
from ..util.logging import InvokeAILogger
warnings.filterwarnings("ignore")
diff --git a/invokeai/backend/model_management/__init__.py b/invokeai/backend/model_management/__init__.py
index 6376585fe5..569428a9e7 100644
--- a/invokeai/backend/model_management/__init__.py
+++ b/invokeai/backend/model_management/__init__.py
@@ -1,15 +1,19 @@
"""
Initialization file for invokeai.backend.model_management
"""
-from .model_manager import ModelManager, ModelInfo, AddModelResult, SchedulerPredictionType # noqa: F401
-from .model_cache import ModelCache # noqa: F401
+# This import must be first
+from .model_manager import ModelManager, ModelInfo, AddModelResult, SchedulerPredictionType # noqa: F401 isort: split
+
from .lora import ModelPatcher, ONNXModelPatcher # noqa: F401
+from .model_cache import ModelCache # noqa: F401
from .models import ( # noqa: F401
BaseModelType,
- ModelType,
- SubModelType,
- ModelVariantType,
- ModelNotFoundException,
DuplicateModelException,
+ ModelNotFoundException,
+ ModelType,
+ ModelVariantType,
+ SubModelType,
)
-from .model_merge import ModelMerger, MergeInterpolationMethod # noqa: F401
+
+# This import must be last
+from .model_merge import ModelMerger, MergeInterpolationMethod # noqa: F401 isort: split
diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py
index 8118e28abb..69d32a49c7 100644
--- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py
+++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py
@@ -25,12 +25,7 @@ from typing import Optional, Union
import requests
import torch
-from diffusers.models import (
- AutoencoderKL,
- ControlNetModel,
- PriorTransformer,
- UNet2DConditionModel,
-)
+from diffusers.models import AutoencoderKL, ControlNetModel, PriorTransformer, UNet2DConditionModel
from diffusers.pipelines.latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel
from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
@@ -64,6 +59,7 @@ from transformers import (
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.util.logging import InvokeAILogger
+
from .models import BaseModelType, ModelVariantType
try:
@@ -1203,8 +1199,8 @@ def download_from_original_stable_diffusion_ckpt(
StableDiffusionControlNetPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionPipeline,
- StableDiffusionXLPipeline,
StableDiffusionXLImg2ImgPipeline,
+ StableDiffusionXLPipeline,
StableUnCLIPImg2ImgPipeline,
StableUnCLIPPipeline,
)
diff --git a/invokeai/backend/model_management/lora.py b/invokeai/backend/model_management/lora.py
index d0d8d4226c..bb44455c88 100644
--- a/invokeai/backend/model_management/lora.py
+++ b/invokeai/backend/model_management/lora.py
@@ -2,8 +2,8 @@ from __future__ import annotations
import copy
from contextlib import contextmanager
-from typing import Optional, Dict, Tuple, Any, Union, List
from pathlib import Path
+from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
@@ -14,7 +14,6 @@ from transformers import CLIPTextModel, CLIPTokenizer
from .models.lora import LoRAModel
-
"""
loras = [
(lora_model1, 0.7),
@@ -307,9 +306,10 @@ class TextualInversionManager(BaseTextualInversionManager):
class ONNXModelPatcher:
- from .models.base import IAIOnnxRuntimeModel
from diffusers import OnnxRuntimeModel
+ from .models.base import IAIOnnxRuntimeModel
+
@classmethod
@contextmanager
def apply_lora_unet(
diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py
index 6f3e5bd6a5..6d0f36ad8c 100644
--- a/invokeai/backend/model_management/model_cache.py
+++ b/invokeai/backend/model_management/model_cache.py
@@ -17,18 +17,19 @@ context. Use like this:
"""
import gc
+import hashlib
import os
import sys
-import hashlib
from contextlib import suppress
from dataclasses import dataclass, field
from pathlib import Path
-from typing import Dict, Union, types, Optional, Type, Any
+from typing import Any, Dict, Optional, Type, Union, types
import torch
import invokeai.backend.util.logging as logger
-from .models import BaseModelType, ModelType, SubModelType, ModelBase
+
+from .models import BaseModelType, ModelBase, ModelType, SubModelType
# Maximum size of the cache, in gigs
# Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously
diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py
index d87bc03fb7..e39ed6bf61 100644
--- a/invokeai/backend/model_management/model_manager.py
+++ b/invokeai/backend/model_management/model_manager.py
@@ -234,8 +234,8 @@ import textwrap
import types
from dataclasses import dataclass
from pathlib import Path
-from shutil import rmtree, move
-from typing import Optional, List, Literal, Tuple, Union, Dict, Set, Callable
+from shutil import move, rmtree
+from typing import Callable, Dict, List, Literal, Optional, Set, Tuple, Union
import torch
import yaml
@@ -246,20 +246,21 @@ from pydantic import BaseModel, Field
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.util import CUDA_DEVICE, Chdir
+
from .model_cache import ModelCache, ModelLocker
from .model_search import ModelSearch
from .models import (
- BaseModelType,
- ModelType,
- SubModelType,
- ModelError,
- SchedulerPredictionType,
MODEL_CLASSES,
- ModelConfigBase,
- ModelNotFoundException,
- InvalidModelException,
+ BaseModelType,
DuplicateModelException,
+ InvalidModelException,
ModelBase,
+ ModelConfigBase,
+ ModelError,
+ ModelNotFoundException,
+ ModelType,
+ SchedulerPredictionType,
+ SubModelType,
)
# We are only starting to number the config file with release 3.
diff --git a/invokeai/backend/model_management/model_merge.py b/invokeai/backend/model_management/model_merge.py
index a34d9b0e3e..59201d64d9 100644
--- a/invokeai/backend/model_management/model_merge.py
+++ b/invokeai/backend/model_management/model_merge.py
@@ -9,13 +9,14 @@ Copyright (c) 2023 Lincoln Stein and the InvokeAI Development Team
import warnings
from enum import Enum
from pathlib import Path
+from typing import List, Optional, Union
+
from diffusers import DiffusionPipeline
from diffusers import logging as dlogging
-from typing import List, Union, Optional
import invokeai.backend.util.logging as logger
-from ...backend.model_management import ModelManager, ModelType, BaseModelType, ModelVariantType, AddModelResult
+from ...backend.model_management import AddModelResult, BaseModelType, ModelManager, ModelType, ModelVariantType
class MergeInterpolationMethod(str, Enum):
diff --git a/invokeai/backend/model_management/model_probe.py b/invokeai/backend/model_management/model_probe.py
index 6f66918a01..bf6f882428 100644
--- a/invokeai/backend/model_management/model_probe.py
+++ b/invokeai/backend/model_management/model_probe.py
@@ -1,20 +1,20 @@
import json
from dataclasses import dataclass
from pathlib import Path
-from typing import Callable, Literal, Union, Dict, Optional
+from typing import Callable, Dict, Literal, Optional, Union
import safetensors.torch
import torch
-from diffusers import ModelMixin, ConfigMixin
+from diffusers import ConfigMixin, ModelMixin
from picklescan.scanner import scan_file_path
from .models import (
BaseModelType,
+ InvalidModelException,
ModelType,
ModelVariantType,
SchedulerPredictionType,
SilenceWarnings,
- InvalidModelException,
)
from .models.base import read_checkpoint_meta
from .util import lora_token_vector_length
diff --git a/invokeai/backend/model_management/model_search.py b/invokeai/backend/model_management/model_search.py
index 0a98091f4a..f4dd8b7739 100644
--- a/invokeai/backend/model_management/model_search.py
+++ b/invokeai/backend/model_management/model_search.py
@@ -5,8 +5,8 @@ Abstract base class for recursive directory search for models.
import os
from abc import ABC, abstractmethod
-from typing import List, Set, types
from pathlib import Path
+from typing import List, Set, types
import invokeai.backend.util.logging as logger
diff --git a/invokeai/backend/model_management/models/__init__.py b/invokeai/backend/model_management/models/__init__.py
index 2de206257b..695e9b0ec0 100644
--- a/invokeai/backend/model_management/models/__init__.py
+++ b/invokeai/backend/model_management/models/__init__.py
@@ -1,29 +1,30 @@
import inspect
from enum import Enum
-from pydantic import BaseModel
from typing import Literal, get_origin
+
+from pydantic import BaseModel
+
from .base import ( # noqa: F401
BaseModelType,
- ModelType,
- SubModelType,
+ DuplicateModelException,
+ InvalidModelException,
ModelBase,
ModelConfigBase,
+ ModelError,
+ ModelNotFoundException,
+ ModelType,
ModelVariantType,
SchedulerPredictionType,
- ModelError,
SilenceWarnings,
- ModelNotFoundException,
- InvalidModelException,
- DuplicateModelException,
+ SubModelType,
)
-from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model
-from .sdxl import StableDiffusionXLModel
-from .vae import VaeModel
-from .lora import LoRAModel
from .controlnet import ControlNetModel # TODO:
-from .textual_inversion import TextualInversionModel
-
+from .lora import LoRAModel
+from .sdxl import StableDiffusionXLModel
+from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model
from .stable_diffusion_onnx import ONNXStableDiffusion1Model, ONNXStableDiffusion2Model
+from .textual_inversion import TextualInversionModel
+from .vae import VaeModel
MODEL_CLASSES = {
BaseModelType.StableDiffusion1: {
diff --git a/invokeai/backend/model_management/models/base.py b/invokeai/backend/model_management/models/base.py
index ed1c2c6098..d704c56103 100644
--- a/invokeai/backend/model_management/models/base.py
+++ b/invokeai/backend/model_management/models/base.py
@@ -1,29 +1,25 @@
+import inspect
import json
import os
import sys
import typing
-import inspect
import warnings
from abc import ABCMeta, abstractmethod
from contextlib import suppress
from enum import Enum
from pathlib import Path
-from picklescan.scanner import scan_file_path
+from typing import Any, Callable, Dict, Generic, List, Literal, Optional, Type, TypeVar, Union
-import torch
import numpy as np
import onnx
import safetensors.torch
-from diffusers import DiffusionPipeline, ConfigMixin
-from onnx import numpy_helper
-from onnxruntime import (
- InferenceSession,
- SessionOptions,
- get_available_providers,
-)
-from pydantic import BaseModel, Field
-from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union
+import torch
+from diffusers import ConfigMixin, DiffusionPipeline
from diffusers import logging as diffusers_logging
+from onnx import numpy_helper
+from onnxruntime import InferenceSession, SessionOptions, get_available_providers
+from picklescan.scanner import scan_file_path
+from pydantic import BaseModel, Field
from transformers import logging as transformers_logging
diff --git a/invokeai/backend/model_management/models/controlnet.py b/invokeai/backend/model_management/models/controlnet.py
index ebc01399b5..359df91a82 100644
--- a/invokeai/backend/model_management/models/controlnet.py
+++ b/invokeai/backend/model_management/models/controlnet.py
@@ -1,23 +1,26 @@
import os
-import torch
from enum import Enum
from pathlib import Path
-from typing import Optional, Literal
+from typing import Literal, Optional
+
+import torch
+
+import invokeai.backend.util.logging as logger
+from invokeai.app.services.config import InvokeAIAppConfig
+
from .base import (
+ BaseModelType,
+ EmptyConfigLoader,
+ InvalidModelException,
ModelBase,
ModelConfigBase,
- BaseModelType,
+ ModelNotFoundException,
ModelType,
SubModelType,
- EmptyConfigLoader,
- calc_model_size_by_fs,
calc_model_size_by_data,
+ calc_model_size_by_fs,
classproperty,
- InvalidModelException,
- ModelNotFoundException,
)
-from invokeai.app.services.config import InvokeAIAppConfig
-import invokeai.backend.util.logging as logger
class ControlNetModelFormat(str, Enum):
diff --git a/invokeai/backend/model_management/models/sdxl.py b/invokeai/backend/model_management/models/sdxl.py
index 5bbe05be98..41586e35b9 100644
--- a/invokeai/backend/model_management/models/sdxl.py
+++ b/invokeai/backend/model_management/models/sdxl.py
@@ -1,19 +1,21 @@
-import os
import json
+import os
from enum import Enum
-from pydantic import Field
from typing import Literal, Optional
+
+from omegaconf import OmegaConf
+from pydantic import Field
+
from .base import (
- ModelConfigBase,
BaseModelType,
+ DiffusersModel,
+ InvalidModelException,
+ ModelConfigBase,
ModelType,
ModelVariantType,
- DiffusersModel,
- read_checkpoint_meta,
classproperty,
- InvalidModelException,
+ read_checkpoint_meta,
)
-from omegaconf import OmegaConf
class StableDiffusionXLModelFormat(str, Enum):
diff --git a/invokeai/backend/model_management/models/stable_diffusion.py b/invokeai/backend/model_management/models/stable_diffusion.py
index cc34f14b9c..ffce42d9e9 100644
--- a/invokeai/backend/model_management/models/stable_diffusion.py
+++ b/invokeai/backend/model_management/models/stable_diffusion.py
@@ -1,26 +1,29 @@
-import os
import json
+import os
from enum import Enum
-from pydantic import Field
from pathlib import Path
from typing import Literal, Optional, Union
+
from diffusers import StableDiffusionInpaintPipeline, StableDiffusionPipeline
-from .base import (
- ModelConfigBase,
- BaseModelType,
- ModelType,
- ModelVariantType,
- DiffusersModel,
- SilenceWarnings,
- read_checkpoint_meta,
- classproperty,
- InvalidModelException,
- ModelNotFoundException,
-)
-from .sdxl import StableDiffusionXLModel
+from omegaconf import OmegaConf
+from pydantic import Field
+
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
-from omegaconf import OmegaConf
+
+from .base import (
+ BaseModelType,
+ DiffusersModel,
+ InvalidModelException,
+ ModelConfigBase,
+ ModelNotFoundException,
+ ModelType,
+ ModelVariantType,
+ SilenceWarnings,
+ classproperty,
+ read_checkpoint_meta,
+)
+from .sdxl import StableDiffusionXLModel
class StableDiffusion1ModelFormat(str, Enum):
@@ -272,8 +275,8 @@ def _convert_ckpt_and_cache(
return output_path
# to avoid circular import errors
- from ..convert_ckpt_to_diffusers import convert_ckpt_to_diffusers
from ...util.devices import choose_torch_device, torch_dtype
+ from ..convert_ckpt_to_diffusers import convert_ckpt_to_diffusers
model_base_to_model_type = {
BaseModelType.StableDiffusion1: "FrozenCLIPEmbedder",
diff --git a/invokeai/backend/model_management/models/stable_diffusion_onnx.py b/invokeai/backend/model_management/models/stable_diffusion_onnx.py
index 2780ba4728..2d0dd22c43 100644
--- a/invokeai/backend/model_management/models/stable_diffusion_onnx.py
+++ b/invokeai/backend/model_management/models/stable_diffusion_onnx.py
@@ -2,15 +2,16 @@ from enum import Enum
from typing import Literal
from diffusers import OnnxRuntimeModel
+
from .base import (
- ModelConfigBase,
BaseModelType,
+ DiffusersModel,
+ IAIOnnxRuntimeModel,
+ ModelConfigBase,
ModelType,
ModelVariantType,
- DiffusersModel,
SchedulerPredictionType,
classproperty,
- IAIOnnxRuntimeModel,
)
diff --git a/invokeai/backend/model_management/models/textual_inversion.py b/invokeai/backend/model_management/models/textual_inversion.py
index a949a15be1..b59e635045 100644
--- a/invokeai/backend/model_management/models/textual_inversion.py
+++ b/invokeai/backend/model_management/models/textual_inversion.py
@@ -1,19 +1,20 @@
import os
-import torch
from typing import Optional
-from .base import (
- ModelBase,
- ModelConfigBase,
- BaseModelType,
- ModelType,
- SubModelType,
- classproperty,
- ModelNotFoundException,
- InvalidModelException,
-)
+
+import torch
# TODO: naming
from ..lora import TextualInversionModel as TextualInversionModelRaw
+from .base import (
+ BaseModelType,
+ InvalidModelException,
+ ModelBase,
+ ModelConfigBase,
+ ModelNotFoundException,
+ ModelType,
+ SubModelType,
+ classproperty,
+)
class TextualInversionModel(ModelBase):
diff --git a/invokeai/backend/model_management/models/vae.py b/invokeai/backend/model_management/models/vae.py
index f5dc11b27b..637160c69b 100644
--- a/invokeai/backend/model_management/models/vae.py
+++ b/invokeai/backend/model_management/models/vae.py
@@ -8,19 +8,20 @@ import torch
from omegaconf import OmegaConf
from invokeai.app.services.config import InvokeAIAppConfig
+
from .base import (
+ BaseModelType,
+ EmptyConfigLoader,
+ InvalidModelException,
ModelBase,
ModelConfigBase,
- BaseModelType,
- ModelType,
- SubModelType,
- ModelVariantType,
- EmptyConfigLoader,
- calc_model_size_by_fs,
- calc_model_size_by_data,
- classproperty,
- InvalidModelException,
ModelNotFoundException,
+ ModelType,
+ ModelVariantType,
+ SubModelType,
+ calc_model_size_by_data,
+ calc_model_size_by_fs,
+ classproperty,
)
diff --git a/invokeai/backend/stable_diffusion/__init__.py b/invokeai/backend/stable_diffusion/__init__.py
index a958750802..9e2b49bf09 100644
--- a/invokeai/backend/stable_diffusion/__init__.py
+++ b/invokeai/backend/stable_diffusion/__init__.py
@@ -9,7 +9,7 @@ from .diffusers_pipeline import ( # noqa: F401
from .diffusion import InvokeAIDiffuserComponent # noqa: F401
from .diffusion.cross_attention_map_saving import AttentionMapSaver # noqa: F401
from .diffusion.shared_invokeai_diffusion import ( # noqa: F401
- PostprocessingSettings,
BasicConditioningInfo,
+ PostprocessingSettings,
SDXLConditioningInfo,
)
diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py
index d88313f455..fc4442c2d6 100644
--- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py
+++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py
@@ -5,20 +5,16 @@ import inspect
from dataclasses import dataclass, field
from typing import Any, Callable, List, Optional, Union
-import PIL.Image
import einops
+import PIL.Image
import psutil
import torch
import torchvision.transforms as T
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.models.controlnet import ControlNetModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
-from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import (
- StableDiffusionPipeline,
-)
-from diffusers.pipelines.stable_diffusion.safety_checker import (
- StableDiffusionSafetyChecker,
-)
+from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipeline
+from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.schedulers.scheduling_utils import SchedulerMixin, SchedulerOutput
from diffusers.utils.import_utils import is_xformers_available
@@ -27,13 +23,9 @@ from pydantic import Field
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from invokeai.app.services.config import InvokeAIAppConfig
-from .diffusion import (
- AttentionMapSaver,
- InvokeAIDiffuserComponent,
- PostprocessingSettings,
- BasicConditioningInfo,
-)
-from ..util import normalize_device, auto_detect_slice_size
+
+from ..util import auto_detect_slice_size, normalize_device
+from .diffusion import AttentionMapSaver, BasicConditioningInfo, InvokeAIDiffuserComponent, PostprocessingSettings
@dataclass
diff --git a/invokeai/backend/stable_diffusion/diffusion/__init__.py b/invokeai/backend/stable_diffusion/diffusion/__init__.py
index 2bcc595889..137e32cff5 100644
--- a/invokeai/backend/stable_diffusion/diffusion/__init__.py
+++ b/invokeai/backend/stable_diffusion/diffusion/__init__.py
@@ -4,8 +4,8 @@ Initialization file for invokeai.models.diffusion
from .cross_attention_control import InvokeAICrossAttentionMixin # noqa: F401
from .cross_attention_map_saving import AttentionMapSaver # noqa: F401
from .shared_invokeai_diffusion import ( # noqa: F401
+ BasicConditioningInfo,
InvokeAIDiffuserComponent,
PostprocessingSettings,
- BasicConditioningInfo,
SDXLConditioningInfo,
)
diff --git a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py
index 35d4800859..03b438525f 100644
--- a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py
+++ b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py
@@ -11,16 +11,12 @@ import diffusers
import psutil
import torch
from compel.cross_attention_control import Arguments
+from diffusers.models.attention_processor import Attention, AttentionProcessor, AttnProcessor, SlicedAttnProcessor
from diffusers.models.unet_2d_condition import UNet2DConditionModel
-from diffusers.models.attention_processor import AttentionProcessor
-from diffusers.models.attention_processor import (
- Attention,
- AttnProcessor,
- SlicedAttnProcessor,
-)
from torch import nn
import invokeai.backend.util.logging as logger
+
from ...util import torch_dtype
diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py
index f05adafca2..6fe53fd002 100644
--- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py
+++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py
@@ -1,8 +1,8 @@
from __future__ import annotations
+import math
from contextlib import contextmanager
from dataclasses import dataclass
-import math
from typing import Any, Callable, Optional, Union
import torch
diff --git a/invokeai/backend/stable_diffusion/schedulers/schedulers.py b/invokeai/backend/stable_diffusion/schedulers/schedulers.py
index 2f62f8c477..16213acb64 100644
--- a/invokeai/backend/stable_diffusion/schedulers/schedulers.py
+++ b/invokeai/backend/stable_diffusion/schedulers/schedulers.py
@@ -1,18 +1,18 @@
from diffusers import (
DDIMScheduler,
+ DDPMScheduler,
+ DEISMultistepScheduler,
DPMSolverMultistepScheduler,
- KDPM2DiscreteScheduler,
- KDPM2AncestralDiscreteScheduler,
- EulerDiscreteScheduler,
+ DPMSolverSDEScheduler,
+ DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
+ EulerDiscreteScheduler,
HeunDiscreteScheduler,
+ KDPM2AncestralDiscreteScheduler,
+ KDPM2DiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UniPCMultistepScheduler,
- DPMSolverSinglestepScheduler,
- DEISMultistepScheduler,
- DDPMScheduler,
- DPMSolverSDEScheduler,
)
SCHEDULER_MAP = dict(
diff --git a/invokeai/backend/training/textual_inversion_training.py b/invokeai/backend/training/textual_inversion_training.py
index d92aa80b38..153bd0fcc4 100644
--- a/invokeai/backend/training/textual_inversion_training.py
+++ b/invokeai/backend/training/textual_inversion_training.py
@@ -24,13 +24,8 @@ import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
-from accelerate.utils import set_seed, ProjectConfiguration
-from diffusers import (
- AutoencoderKL,
- DDPMScheduler,
- StableDiffusionPipeline,
- UNet2DConditionModel,
-)
+from accelerate.utils import ProjectConfiguration, set_seed
+from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version
from diffusers.utils.import_utils import is_xformers_available
diff --git a/invokeai/backend/util/__init__.py b/invokeai/backend/util/__init__.py
index a8d53f54a4..601aab00cb 100644
--- a/invokeai/backend/util/__init__.py
+++ b/invokeai/backend/util/__init__.py
@@ -1,6 +1,7 @@
"""
Initialization file for invokeai.backend.util
"""
+from .attention import auto_detect_slice_size # noqa: F401
from .devices import ( # noqa: F401
CPU_DEVICE,
CUDA_DEVICE,
@@ -10,11 +11,4 @@ from .devices import ( # noqa: F401
normalize_device,
torch_dtype,
)
-from .util import ( # noqa: F401
- ask_user,
- download_with_resume,
- instantiate_from_config,
- url_attachment_name,
- Chdir,
-)
-from .attention import auto_detect_slice_size # noqa: F401
+from .util import Chdir, ask_user, download_with_resume, instantiate_from_config, url_attachment_name # noqa: F401
diff --git a/invokeai/backend/util/attention.py b/invokeai/backend/util/attention.py
index a821464394..910933044e 100644
--- a/invokeai/backend/util/attention.py
+++ b/invokeai/backend/util/attention.py
@@ -3,8 +3,8 @@
Utility routine used for autodetection of optimal slice size
for attention mechanism.
"""
-import torch
import psutil
+import torch
def auto_detect_slice_size(latents: torch.Tensor) -> str:
diff --git a/invokeai/backend/util/devices.py b/invokeai/backend/util/devices.py
index bdaf3244f3..84ca7ee02b 100644
--- a/invokeai/backend/util/devices.py
+++ b/invokeai/backend/util/devices.py
@@ -1,12 +1,13 @@
from __future__ import annotations
-from contextlib import nullcontext
-from packaging import version
import platform
+from contextlib import nullcontext
+from typing import Union
import torch
+from packaging import version
from torch import autocast
-from typing import Union
+
from invokeai.app.services.config import InvokeAIAppConfig
CPU_DEVICE = torch.device("cpu")
diff --git a/invokeai/backend/util/logging.py b/invokeai/backend/util/logging.py
index 82706d8181..accbc407f7 100644
--- a/invokeai/backend/util/logging.py
+++ b/invokeai/backend/util/logging.py
@@ -178,7 +178,6 @@ InvokeAI:
import logging.handlers
import socket
import urllib.parse
-
from abc import abstractmethod
from pathlib import Path
diff --git a/invokeai/backend/util/util.py b/invokeai/backend/util/util.py
index 7ef9c72fb0..0796f1a8cd 100644
--- a/invokeai/backend/util/util.py
+++ b/invokeai/backend/util/util.py
@@ -1,11 +1,10 @@
+import base64
import importlib
+import io
import math
import multiprocessing as mp
import os
import re
-import io
-import base64
-
from collections import abc
from inspect import isfunction
from pathlib import Path
@@ -19,6 +18,7 @@ from PIL import Image, ImageDraw, ImageFont
from tqdm import tqdm
import invokeai.backend.util.logging as logger
+
from .devices import torch_dtype
diff --git a/invokeai/frontend/install/import_images.py b/invokeai/frontend/install/import_images.py
index c0db81e141..e3f9ea84f3 100644
--- a/invokeai/frontend/install/import_images.py
+++ b/invokeai/frontend/install/import_images.py
@@ -6,25 +6,25 @@
# pylint: disable=broad-exception-caught
"""Script to import images into the new database system for 3.0.0"""
-import os
import datetime
-import shutil
-import locale
-import sqlite3
-import json
import glob
+import json
+import locale
+import os
import re
+import shutil
+import sqlite3
import uuid
-import yaml
+from pathlib import Path
+
import PIL
import PIL.ImageOps
import PIL.PngImagePlugin
-
-from pathlib import Path
+import yaml
from prompt_toolkit import prompt
-from prompt_toolkit.shortcuts import message_dialog
from prompt_toolkit.completion import PathCompleter
from prompt_toolkit.key_binding import KeyBindings
+from prompt_toolkit.shortcuts import message_dialog
from invokeai.app.services.config import InvokeAIAppConfig
diff --git a/invokeai/frontend/install/invokeai_update.py b/invokeai/frontend/install/invokeai_update.py
index 45a0e8ec0a..87661da79f 100644
--- a/invokeai/frontend/install/invokeai_update.py
+++ b/invokeai/frontend/install/invokeai_update.py
@@ -4,6 +4,7 @@ pip install .
"""
import os
import platform
+
import pkg_resources
import psutil
import requests
diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py
index 6e4440abef..fae67df736 100644
--- a/invokeai/frontend/install/model_install.py
+++ b/invokeai/frontend/install/model_install.py
@@ -11,6 +11,7 @@ The work is actually done in backend code in model_install_backend.py.
import argparse
import curses
+import logging
import sys
import textwrap
import traceback
@@ -20,34 +21,28 @@ from multiprocessing.connection import Connection, Pipe
from pathlib import Path
from shutil import get_terminal_size
-import logging
import npyscreen
import torch
from npyscreen import widget
-from invokeai.backend.util.logging import InvokeAILogger
-
-from invokeai.backend.install.model_install_backend import (
- InstallSelections,
- ModelInstall,
- SchedulerPredictionType,
-)
+from invokeai.app.services.config import InvokeAIAppConfig
+from invokeai.backend.install.model_install_backend import InstallSelections, ModelInstall, SchedulerPredictionType
from invokeai.backend.model_management import ModelManager, ModelType
from invokeai.backend.util import choose_precision, choose_torch_device
+from invokeai.backend.util.logging import InvokeAILogger
from invokeai.frontend.install.widgets import (
+ MIN_COLS,
+ MIN_LINES,
+ BufferBox,
CenteredTitleText,
+ CyclingForm,
MultiSelectColumns,
SingleSelectColumns,
TextBox,
- BufferBox,
- set_min_terminal_size,
- select_stable_diffusion_config_file,
- CyclingForm,
- MIN_COLS,
- MIN_LINES,
WindowTooSmallException,
+ select_stable_diffusion_config_file,
+ set_min_terminal_size,
)
-from invokeai.app.services.config import InvokeAIAppConfig
config = InvokeAIAppConfig.get_config()
logger = InvokeAILogger.getLogger()
diff --git a/invokeai/frontend/install/widgets.py b/invokeai/frontend/install/widgets.py
index 9eefd93e09..06d5473fa3 100644
--- a/invokeai/frontend/install/widgets.py
+++ b/invokeai/frontend/install/widgets.py
@@ -5,16 +5,17 @@ import curses
import math
import os
import platform
-import pyperclip
import struct
import subprocess
import sys
-import npyscreen
import textwrap
-import npyscreen.wgmultiline as wgmultiline
-from npyscreen import fmPopup
-from shutil import get_terminal_size
from curses import BUTTON2_CLICKED, BUTTON3_CLICKED
+from shutil import get_terminal_size
+
+import npyscreen
+import npyscreen.wgmultiline as wgmultiline
+import pyperclip
+from npyscreen import fmPopup
# minimum size for UIs
MIN_COLS = 150
diff --git a/invokeai/frontend/legacy_launch_invokeai.py b/invokeai/frontend/legacy_launch_invokeai.py
index e1e7dc26ab..9e4cca7eac 100644
--- a/invokeai/frontend/legacy_launch_invokeai.py
+++ b/invokeai/frontend/legacy_launch_invokeai.py
@@ -1,5 +1,5 @@
-import sys
import argparse
+import sys
def main():
diff --git a/invokeai/frontend/merge/merge_diffusers.py b/invokeai/frontend/merge/merge_diffusers.py
index ada8eed644..8fa02cb49c 100644
--- a/invokeai/frontend/merge/merge_diffusers.py
+++ b/invokeai/frontend/merge/merge_diffusers.py
@@ -16,13 +16,8 @@ from npyscreen import widget
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
-from invokeai.backend.model_management import (
- ModelMerger,
- ModelManager,
- ModelType,
- BaseModelType,
-)
-from invokeai.frontend.install.widgets import FloatTitleSlider, TextBox, SingleSelectColumns
+from invokeai.backend.model_management import BaseModelType, ModelManager, ModelMerger, ModelType
+from invokeai.frontend.install.widgets import FloatTitleSlider, SingleSelectColumns, TextBox
config = InvokeAIAppConfig.get_config()
diff --git a/invokeai/frontend/training/textual_inversion.py b/invokeai/frontend/training/textual_inversion.py
index 12f4db8e7b..f3911f7e0e 100755
--- a/invokeai/frontend/training/textual_inversion.py
+++ b/invokeai/frontend/training/textual_inversion.py
@@ -21,8 +21,8 @@ from npyscreen import widget
from omegaconf import OmegaConf
import invokeai.backend.util.logging as logger
-
from invokeai.app.services.config import InvokeAIAppConfig
+
from ...backend.training import do_textual_inversion_training, parse_args
TRAINING_DATA = "text-inversion-training-data"
diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json
index 125554fc40..c1983c6a53 100644
--- a/invokeai/frontend/web/public/locales/en.json
+++ b/invokeai/frontend/web/public/locales/en.json
@@ -1,40 +1,66 @@
{
"accessibility": {
- "modelSelect": "Model Select",
- "invokeProgressBar": "Invoke progress bar",
- "reset": "Reset",
- "uploadImage": "Upload Image",
- "previousImage": "Previous Image",
- "nextImage": "Next Image",
- "useThisParameter": "Use this parameter",
"copyMetadataJson": "Copy metadata JSON",
"exitViewer": "Exit Viewer",
- "zoomIn": "Zoom In",
- "zoomOut": "Zoom Out",
- "rotateCounterClockwise": "Rotate Counter-Clockwise",
- "rotateClockwise": "Rotate Clockwise",
"flipHorizontally": "Flip Horizontally",
"flipVertically": "Flip Vertically",
+ "invokeProgressBar": "Invoke progress bar",
+ "menu": "Menu",
+ "modelSelect": "Model Select",
"modifyConfig": "Modify Config",
- "toggleAutoscroll": "Toggle autoscroll",
- "toggleLogViewer": "Toggle Log Viewer",
+ "nextImage": "Next Image",
+ "previousImage": "Previous Image",
+ "reset": "Reset",
+ "rotateClockwise": "Rotate Clockwise",
+ "rotateCounterClockwise": "Rotate Counter-Clockwise",
"showGallery": "Show Gallery",
"showOptionsPanel": "Show Side Panel",
- "menu": "Menu"
+ "toggleAutoscroll": "Toggle autoscroll",
+ "toggleLogViewer": "Toggle Log Viewer",
+ "uploadImage": "Upload Image",
+ "useThisParameter": "Use this parameter",
+ "zoomIn": "Zoom In",
+ "zoomOut": "Zoom Out"
+ },
+ "boards": {
+ "addBoard": "Add Board",
+ "autoAddBoard": "Auto-Add Board",
+ "bottomMessage": "Deleting this board and its images will reset any features currently using them.",
+ "cancel": "Cancel",
+ "changeBoard": "Change Board",
+ "clearSearch": "Clear Search",
+ "loading": "Loading...",
+ "menuItemAutoAdd": "Auto-add to this Board",
+ "move": "Move",
+ "myBoard": "My Board",
+ "noMatching": "No matching Boards",
+ "searchBoard": "Search Boards...",
+ "selectBoard": "Select a Board",
+ "topMessage": "This board contains images used in the following features:",
+ "uncategorized": "Uncategorized"
},
"common": {
+ "accept": "Accept",
+ "advanced": "Advanced",
+ "areYouSure": "Are you sure?",
+ "back": "Back",
+ "batch": "Batch Manager",
+ "cancel": "Cancel",
+ "close": "Close",
"communityLabel": "Community",
- "hotkeysLabel": "Hotkeys",
+ "controlNet": "Controlnet",
"darkMode": "Dark Mode",
- "lightMode": "Light Mode",
- "languagePickerLabel": "Language",
- "reportBugLabel": "Report Bug",
- "githubLabel": "Github",
"discordLabel": "Discord",
- "settingsLabel": "Settings",
+ "dontAskMeAgain": "Don't ask me again",
+ "generate": "Generate",
+ "githubLabel": "Github",
+ "hotkeysLabel": "Hotkeys",
+ "imagePrompt": "Image Prompt",
+ "img2img": "Image To Image",
"langArabic": "العربية",
- "langEnglish": "English",
+ "langBrPortuguese": "Português do Brasil",
"langDutch": "Nederlands",
+ "langEnglish": "English",
"langFrench": "Français",
"langGerman": "Deutsch",
"langHebrew": "עברית",
@@ -43,377 +69,426 @@
"langKorean": "한국어",
"langPolish": "Polski",
"langPortuguese": "Português",
- "langBrPortuguese": "Português do Brasil",
"langRussian": "Русский",
"langSimplifiedChinese": "简体中文",
- "langUkranian": "Украї́нська",
"langSpanish": "Español",
- "txt2img": "Text To Image",
- "img2img": "Image To Image",
- "unifiedCanvas": "Unified Canvas",
+ "languagePickerLabel": "Language",
+ "langUkranian": "Украї́нська",
+ "lightMode": "Light Mode",
"linear": "Linear",
- "nodes": "Workflow Editor",
- "batch": "Batch Manager",
+ "load": "Load",
+ "loading": "Loading",
+ "loadingInvokeAI": "Loading Invoke AI",
"modelManager": "Model Manager",
- "postprocessing": "Post Processing",
+ "nodeEditor": "Node Editor",
+ "nodes": "Workflow Editor",
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
- "postProcessing": "Post Processing",
+ "openInNewTab": "Open in New Tab",
"postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.",
"postProcessDesc2": "A dedicated UI will be released soon to facilitate more advanced post processing workflows.",
"postProcessDesc3": "The Invoke AI Command Line Interface offers various other features including Embiggen.",
- "training": "Training",
- "trainingDesc1": "A dedicated workflow for training your own embeddings and checkpoints using Textual Inversion and Dreambooth from the web interface.",
- "trainingDesc2": "InvokeAI already supports training custom embeddourings using Textual Inversion using the main script.",
- "upload": "Upload",
- "close": "Close",
- "cancel": "Cancel",
- "accept": "Accept",
- "load": "Load",
- "back": "Back",
+ "postprocessing": "Post Processing",
+ "postProcessing": "Post Processing",
+ "random": "Random",
+ "reportBugLabel": "Report Bug",
+ "settingsLabel": "Settings",
"statusConnected": "Connected",
+ "statusConvertingModel": "Converting Model",
"statusDisconnected": "Disconnected",
"statusError": "Error",
- "statusPreparing": "Preparing",
- "statusProcessingCanceled": "Processing Canceled",
- "statusProcessingComplete": "Processing Complete",
"statusGenerating": "Generating",
- "statusGeneratingTextToImage": "Generating Text To Image",
"statusGeneratingImageToImage": "Generating Image To Image",
"statusGeneratingInpainting": "Generating Inpainting",
"statusGeneratingOutpainting": "Generating Outpainting",
+ "statusGeneratingTextToImage": "Generating Text To Image",
"statusGenerationComplete": "Generation Complete",
"statusIterationComplete": "Iteration Complete",
- "statusSavingImage": "Saving Image",
+ "statusLoadingModel": "Loading Model",
+ "statusMergedModels": "Models Merged",
+ "statusMergingModels": "Merging Models",
+ "statusModelChanged": "Model Changed",
+ "statusModelConverted": "Model Converted",
+ "statusPreparing": "Preparing",
+ "statusProcessingCanceled": "Processing Canceled",
+ "statusProcessingComplete": "Processing Complete",
"statusRestoringFaces": "Restoring Faces",
- "statusRestoringFacesGFPGAN": "Restoring Faces (GFPGAN)",
"statusRestoringFacesCodeFormer": "Restoring Faces (CodeFormer)",
+ "statusRestoringFacesGFPGAN": "Restoring Faces (GFPGAN)",
+ "statusSavingImage": "Saving Image",
"statusUpscaling": "Upscaling",
"statusUpscalingESRGAN": "Upscaling (ESRGAN)",
- "statusLoadingModel": "Loading Model",
- "statusModelChanged": "Model Changed",
- "statusConvertingModel": "Converting Model",
- "statusModelConverted": "Model Converted",
- "statusMergingModels": "Merging Models",
- "statusMergedModels": "Models Merged",
- "loading": "Loading",
- "loadingInvokeAI": "Loading Invoke AI",
- "random": "Random",
- "generate": "Generate",
- "openInNewTab": "Open in New Tab",
- "dontAskMeAgain": "Don't ask me again",
- "areYouSure": "Are you sure?",
- "imagePrompt": "Image Prompt"
+ "training": "Training",
+ "trainingDesc1": "A dedicated workflow for training your own embeddings and checkpoints using Textual Inversion and Dreambooth from the web interface.",
+ "trainingDesc2": "InvokeAI already supports training custom embeddourings using Textual Inversion using the main script.",
+ "txt2img": "Text To Image",
+ "unifiedCanvas": "Unified Canvas",
+ "upload": "Upload"
+ },
+ "controlnet": {
+ "amult": "a_mult",
+ "autoConfigure": "Auto configure processor",
+ "balanced": "Balanced",
+ "beginEndStepPercent": "Begin / End Step Percentage",
+ "bgth": "bg_th",
+ "canny": "Canny",
+ "cannyDescription": "Canny edge detection",
+ "coarse": "Coarse",
+ "contentShuffle": "Content Shuffle",
+ "contentShuffleDescription": "Shuffles the content in an image",
+ "control": "Control",
+ "controlMode": "Control Mode",
+ "crop": "Crop",
+ "delete": "Delete",
+ "depthMidas": "Depth (Midas)",
+ "depthMidasDescription": "Depth map generation using Midas",
+ "depthZoe": "Depth (Zoe)",
+ "depthZoeDescription": "Depth map generation using Zoe",
+ "detectResolution": "Detect Resolution",
+ "duplicate": "Duplicate",
+ "enableControlnet": "Enable ControlNet",
+ "f": "F",
+ "fill": "Fill",
+ "h": "H",
+ "handAndFace": "Hand and Face",
+ "hed": "HED",
+ "hedDescription": "Holistically-Nested Edge Detection",
+ "hideAdvanced": "Hide Advanced",
+ "highThreshold": "High Threshold",
+ "imageResolution": "Image Resolution",
+ "importImageFromCanvas": "Import Image From Canvas",
+ "importMaskFromCanvas": "Import Mask From Canvas",
+ "incompatibleBaseModel": "Incompatible base model:",
+ "lineart": "Lineart",
+ "lineartAnime": "Lineart Anime",
+ "lineartAnimeDescription": "Anime-style lineart processing",
+ "lineartDescription": "Converts image to lineart",
+ "lowThreshold": "Low Threshold",
+ "maxFaces": "Max Faces",
+ "mediapipeFace": "Mediapipe Face",
+ "mediapipeFaceDescription": "Face detection using Mediapipe",
+ "megaControl": "Mega Control",
+ "minConfidence": "Min Confidence",
+ "mlsd": "M-LSD",
+ "mlsdDescription": "Minimalist Line Segment Detector",
+ "none": "None",
+ "noneDescription": "No processing applied",
+ "normalBae": "Normal BAE",
+ "normalBaeDescription": "Normal BAE processing",
+ "openPose": "Openpose",
+ "openPoseDescription": "Human pose estimation using Openpose",
+ "pidi": "PIDI",
+ "pidiDescription": "PIDI image processing",
+ "processor": "Processor",
+ "prompt": "Prompt",
+ "resetControlImage": "Reset Control Image",
+ "resize": "Resize",
+ "resizeMode": "Resize Mode",
+ "safe": "Safe",
+ "saveControlImage": "Save Control Image",
+ "scribble": "scribble",
+ "selectModel": "Select a model",
+ "setControlImageDimensions": "Set Control Image Dimensions To W/H",
+ "showAdvanced": "Show Advanced",
+ "toggleControlNet": "Toggle this ControlNet",
+ "w": "W",
+ "weight": "Weight"
+ },
+ "embedding": {
+ "addEmbedding": "Add Embedding",
+ "incompatibleModel": "Incompatible base model:",
+ "noMatchingEmbedding": "No matching Embeddings"
},
"gallery": {
- "generations": "Generations",
- "showGenerations": "Show Generations",
- "uploads": "Uploads",
- "showUploads": "Show Uploads",
- "galleryImageSize": "Image Size",
- "galleryImageResetSize": "Reset Size",
- "gallerySettings": "Gallery Settings",
- "maintainAspectRatio": "Maintain Aspect Ratio",
- "autoSwitchNewImages": "Auto-Switch to New Images",
- "singleColumnLayout": "Single Column Layout",
"allImagesLoaded": "All Images Loaded",
- "loadMore": "Load More",
- "noImagesInGallery": "No Images to Display",
+ "assets": "Assets",
+ "autoAssignBoardOnClick": "Auto-Assign Board on Click",
+ "autoSwitchNewImages": "Auto-Switch to New Images",
+ "copy": "Copy",
+ "currentlyInUse": "This image is currently in use in the following features:",
"deleteImage": "Delete Image",
"deleteImageBin": "Deleted images will be sent to your operating system's Bin.",
"deleteImagePermanent": "Deleted images cannot be restored.",
+ "download": "Download",
+ "featuresWillReset": "If you delete this image, those features will immediately be reset.",
+ "galleryImageResetSize": "Reset Size",
+ "galleryImageSize": "Image Size",
+ "gallerySettings": "Gallery Settings",
+ "generations": "Generations",
"images": "Images",
- "assets": "Assets",
- "autoAssignBoardOnClick": "Auto-Assign Board on Click"
+ "loading": "Loading",
+ "loadMore": "Load More",
+ "maintainAspectRatio": "Maintain Aspect Ratio",
+ "noImagesInGallery": "No Images to Display",
+ "setCurrentImage": "Set as Current Image",
+ "showGenerations": "Show Generations",
+ "showUploads": "Show Uploads",
+ "singleColumnLayout": "Single Column Layout",
+ "unableToLoad": "Unable to load Gallery",
+ "uploads": "Uploads"
},
"hotkeys": {
- "keyboardShortcuts": "Keyboard Shortcuts",
- "appHotkeys": "App Hotkeys",
- "generalHotkeys": "General Hotkeys",
- "galleryHotkeys": "Gallery Hotkeys",
- "unifiedCanvasHotkeys": "Unified Canvas Hotkeys",
- "nodesHotkeys": "Nodes Hotkeys",
- "invoke": {
- "title": "Invoke",
- "desc": "Generate an image"
- },
- "cancel": {
- "title": "Cancel",
- "desc": "Cancel image generation"
- },
- "focusPrompt": {
- "title": "Focus Prompt",
- "desc": "Focus the prompt input area"
- },
- "toggleOptions": {
- "title": "Toggle Options",
- "desc": "Open and close the options panel"
- },
- "pinOptions": {
- "title": "Pin Options",
- "desc": "Pin the options panel"
- },
- "toggleViewer": {
- "title": "Toggle Viewer",
- "desc": "Open and close Image Viewer"
- },
- "toggleGallery": {
- "title": "Toggle Gallery",
- "desc": "Open and close the gallery drawer"
- },
- "maximizeWorkSpace": {
- "title": "Maximize Workspace",
- "desc": "Close panels and maximize work area"
- },
- "changeTabs": {
- "title": "Change Tabs",
- "desc": "Switch to another workspace"
- },
- "consoleToggle": {
- "title": "Console Toggle",
- "desc": "Open and close console"
- },
- "setPrompt": {
- "title": "Set Prompt",
- "desc": "Use the prompt of the current image"
- },
- "setSeed": {
- "title": "Set Seed",
- "desc": "Use the seed of the current image"
- },
- "setParameters": {
- "title": "Set Parameters",
- "desc": "Use all parameters of the current image"
- },
- "restoreFaces": {
- "title": "Restore Faces",
- "desc": "Restore the current image"
- },
- "upscale": {
- "title": "Upscale",
- "desc": "Upscale the current image"
- },
- "showInfo": {
- "title": "Show Info",
- "desc": "Show metadata info of the current image"
- },
- "sendToImageToImage": {
- "title": "Send To Image To Image",
- "desc": "Send current image to Image to Image"
- },
- "deleteImage": {
- "title": "Delete Image",
- "desc": "Delete the current image"
- },
- "closePanels": {
- "title": "Close Panels",
- "desc": "Closes open panels"
- },
- "previousImage": {
- "title": "Previous Image",
- "desc": "Display the previous image in gallery"
- },
- "nextImage": {
- "title": "Next Image",
- "desc": "Display the next image in gallery"
- },
- "toggleGalleryPin": {
- "title": "Toggle Gallery Pin",
- "desc": "Pins and unpins the gallery to the UI"
- },
- "increaseGalleryThumbSize": {
- "title": "Increase Gallery Image Size",
- "desc": "Increases gallery thumbnails size"
- },
- "decreaseGalleryThumbSize": {
- "title": "Decrease Gallery Image Size",
- "desc": "Decreases gallery thumbnails size"
- },
- "selectBrush": {
- "title": "Select Brush",
- "desc": "Selects the canvas brush"
- },
- "selectEraser": {
- "title": "Select Eraser",
- "desc": "Selects the canvas eraser"
- },
- "decreaseBrushSize": {
- "title": "Decrease Brush Size",
- "desc": "Decreases the size of the canvas brush/eraser"
- },
- "increaseBrushSize": {
- "title": "Increase Brush Size",
- "desc": "Increases the size of the canvas brush/eraser"
- },
- "decreaseBrushOpacity": {
- "title": "Decrease Brush Opacity",
- "desc": "Decreases the opacity of the canvas brush"
- },
- "increaseBrushOpacity": {
- "title": "Increase Brush Opacity",
- "desc": "Increases the opacity of the canvas brush"
- },
- "moveTool": {
- "title": "Move Tool",
- "desc": "Allows canvas navigation"
- },
- "fillBoundingBox": {
- "title": "Fill Bounding Box",
- "desc": "Fills the bounding box with brush color"
- },
- "eraseBoundingBox": {
- "title": "Erase Bounding Box",
- "desc": "Erases the bounding box area"
- },
- "colorPicker": {
- "title": "Select Color Picker",
- "desc": "Selects the canvas color picker"
- },
- "toggleSnap": {
- "title": "Toggle Snap",
- "desc": "Toggles Snap to Grid"
- },
- "quickToggleMove": {
- "title": "Quick Toggle Move",
- "desc": "Temporarily toggles Move mode"
- },
- "toggleLayer": {
- "title": "Toggle Layer",
- "desc": "Toggles mask/base layer selection"
- },
- "clearMask": {
- "title": "Clear Mask",
- "desc": "Clear the entire mask"
- },
- "hideMask": {
- "title": "Hide Mask",
- "desc": "Hide and unhide mask"
- },
- "showHideBoundingBox": {
- "title": "Show/Hide Bounding Box",
- "desc": "Toggle visibility of bounding box"
- },
- "mergeVisible": {
- "title": "Merge Visible",
- "desc": "Merge all visible layers of canvas"
- },
- "saveToGallery": {
- "title": "Save To Gallery",
- "desc": "Save current canvas to gallery"
- },
- "copyToClipboard": {
- "title": "Copy to Clipboard",
- "desc": "Copy current canvas to clipboard"
- },
- "downloadImage": {
- "title": "Download Image",
- "desc": "Download current canvas"
- },
- "undoStroke": {
- "title": "Undo Stroke",
- "desc": "Undo a brush stroke"
- },
- "redoStroke": {
- "title": "Redo Stroke",
- "desc": "Redo a brush stroke"
- },
- "resetView": {
- "title": "Reset View",
- "desc": "Reset Canvas View"
- },
- "previousStagingImage": {
- "title": "Previous Staging Image",
- "desc": "Previous Staging Area Image"
- },
- "nextStagingImage": {
- "title": "Next Staging Image",
- "desc": "Next Staging Area Image"
- },
"acceptStagingImage": {
- "title": "Accept Staging Image",
- "desc": "Accept Current Staging Area Image"
+ "desc": "Accept Current Staging Area Image",
+ "title": "Accept Staging Image"
},
"addNodes": {
- "title": "Add Nodes",
- "desc": "Opens the add node menu"
+ "desc": "Opens the add node menu",
+ "title": "Add Nodes"
+ },
+ "appHotkeys": "App Hotkeys",
+ "cancel": {
+ "desc": "Cancel image generation",
+ "title": "Cancel"
+ },
+ "changeTabs": {
+ "desc": "Switch to another workspace",
+ "title": "Change Tabs"
+ },
+ "clearMask": {
+ "desc": "Clear the entire mask",
+ "title": "Clear Mask"
+ },
+ "closePanels": {
+ "desc": "Closes open panels",
+ "title": "Close Panels"
+ },
+ "colorPicker": {
+ "desc": "Selects the canvas color picker",
+ "title": "Select Color Picker"
+ },
+ "consoleToggle": {
+ "desc": "Open and close console",
+ "title": "Console Toggle"
+ },
+ "copyToClipboard": {
+ "desc": "Copy current canvas to clipboard",
+ "title": "Copy to Clipboard"
+ },
+ "decreaseBrushOpacity": {
+ "desc": "Decreases the opacity of the canvas brush",
+ "title": "Decrease Brush Opacity"
+ },
+ "decreaseBrushSize": {
+ "desc": "Decreases the size of the canvas brush/eraser",
+ "title": "Decrease Brush Size"
+ },
+ "decreaseGalleryThumbSize": {
+ "desc": "Decreases gallery thumbnails size",
+ "title": "Decrease Gallery Image Size"
+ },
+ "deleteImage": {
+ "desc": "Delete the current image",
+ "title": "Delete Image"
+ },
+ "downloadImage": {
+ "desc": "Download current canvas",
+ "title": "Download Image"
+ },
+ "eraseBoundingBox": {
+ "desc": "Erases the bounding box area",
+ "title": "Erase Bounding Box"
+ },
+ "fillBoundingBox": {
+ "desc": "Fills the bounding box with brush color",
+ "title": "Fill Bounding Box"
+ },
+ "focusPrompt": {
+ "desc": "Focus the prompt input area",
+ "title": "Focus Prompt"
+ },
+ "galleryHotkeys": "Gallery Hotkeys",
+ "generalHotkeys": "General Hotkeys",
+ "hideMask": {
+ "desc": "Hide and unhide mask",
+ "title": "Hide Mask"
+ },
+ "increaseBrushOpacity": {
+ "desc": "Increases the opacity of the canvas brush",
+ "title": "Increase Brush Opacity"
+ },
+ "increaseBrushSize": {
+ "desc": "Increases the size of the canvas brush/eraser",
+ "title": "Increase Brush Size"
+ },
+ "increaseGalleryThumbSize": {
+ "desc": "Increases gallery thumbnails size",
+ "title": "Increase Gallery Image Size"
+ },
+ "invoke": {
+ "desc": "Generate an image",
+ "title": "Invoke"
+ },
+ "keyboardShortcuts": "Keyboard Shortcuts",
+ "maximizeWorkSpace": {
+ "desc": "Close panels and maximize work area",
+ "title": "Maximize Workspace"
+ },
+ "mergeVisible": {
+ "desc": "Merge all visible layers of canvas",
+ "title": "Merge Visible"
+ },
+ "moveTool": {
+ "desc": "Allows canvas navigation",
+ "title": "Move Tool"
+ },
+ "nextImage": {
+ "desc": "Display the next image in gallery",
+ "title": "Next Image"
+ },
+ "nextStagingImage": {
+ "desc": "Next Staging Area Image",
+ "title": "Next Staging Image"
+ },
+ "nodesHotkeys": "Nodes Hotkeys",
+ "pinOptions": {
+ "desc": "Pin the options panel",
+ "title": "Pin Options"
+ },
+ "previousImage": {
+ "desc": "Display the previous image in gallery",
+ "title": "Previous Image"
+ },
+ "previousStagingImage": {
+ "desc": "Previous Staging Area Image",
+ "title": "Previous Staging Image"
+ },
+ "quickToggleMove": {
+ "desc": "Temporarily toggles Move mode",
+ "title": "Quick Toggle Move"
+ },
+ "redoStroke": {
+ "desc": "Redo a brush stroke",
+ "title": "Redo Stroke"
+ },
+ "resetView": {
+ "desc": "Reset Canvas View",
+ "title": "Reset View"
+ },
+ "restoreFaces": {
+ "desc": "Restore the current image",
+ "title": "Restore Faces"
+ },
+ "saveToGallery": {
+ "desc": "Save current canvas to gallery",
+ "title": "Save To Gallery"
+ },
+ "selectBrush": {
+ "desc": "Selects the canvas brush",
+ "title": "Select Brush"
+ },
+ "selectEraser": {
+ "desc": "Selects the canvas eraser",
+ "title": "Select Eraser"
+ },
+ "sendToImageToImage": {
+ "desc": "Send current image to Image to Image",
+ "title": "Send To Image To Image"
+ },
+ "setParameters": {
+ "desc": "Use all parameters of the current image",
+ "title": "Set Parameters"
+ },
+ "setPrompt": {
+ "desc": "Use the prompt of the current image",
+ "title": "Set Prompt"
+ },
+ "setSeed": {
+ "desc": "Use the seed of the current image",
+ "title": "Set Seed"
+ },
+ "showHideBoundingBox": {
+ "desc": "Toggle visibility of bounding box",
+ "title": "Show/Hide Bounding Box"
+ },
+ "showInfo": {
+ "desc": "Show metadata info of the current image",
+ "title": "Show Info"
+ },
+ "toggleGallery": {
+ "desc": "Open and close the gallery drawer",
+ "title": "Toggle Gallery"
+ },
+ "toggleGalleryPin": {
+ "desc": "Pins and unpins the gallery to the UI",
+ "title": "Toggle Gallery Pin"
+ },
+ "toggleLayer": {
+ "desc": "Toggles mask/base layer selection",
+ "title": "Toggle Layer"
+ },
+ "toggleOptions": {
+ "desc": "Open and close the options panel",
+ "title": "Toggle Options"
+ },
+ "toggleSnap": {
+ "desc": "Toggles Snap to Grid",
+ "title": "Toggle Snap"
+ },
+ "toggleViewer": {
+ "desc": "Open and close Image Viewer",
+ "title": "Toggle Viewer"
+ },
+ "undoStroke": {
+ "desc": "Undo a brush stroke",
+ "title": "Undo Stroke"
+ },
+ "unifiedCanvasHotkeys": "Unified Canvas Hotkeys",
+ "upscale": {
+ "desc": "Upscale the current image",
+ "title": "Upscale"
}
},
- "modelManager": {
- "modelManager": "Model Manager",
+ "metadata": {
+ "cfgScale": "CFG scale",
+ "createdBy": "Created By",
+ "fit": "Image to image fit",
+ "generationMode": "Generation Mode",
+ "height": "Height",
+ "hiresFix": "High Resolution Optimization",
+ "imageDetails": "Image Details",
+ "initImage": "Initial image",
+ "metadata": "Metadata",
"model": "Model",
- "vae": "VAE",
- "allModels": "All Models",
- "checkpointModels": "Checkpoints",
- "diffusersModels": "Diffusers",
- "loraModels": "LoRAs",
- "safetensorModels": "SafeTensors",
- "onnxModels": "Onnx",
- "oliveModels": "Olives",
- "modelAdded": "Model Added",
- "modelUpdated": "Model Updated",
- "modelUpdateFailed": "Model Update Failed",
- "modelEntryDeleted": "Model Entry Deleted",
- "cannotUseSpaces": "Cannot Use Spaces",
+ "negativePrompt": "Negative Prompt",
+ "noImageDetails": "No image details found",
+ "noMetaData": "No metadata found",
+ "perlin": "Perlin Noise",
+ "positivePrompt": "Positive Prompt",
+ "scheduler": "Scheduler",
+ "seamless": "Seamless",
+ "seed": "Seed",
+ "steps": "Steps",
+ "strength": "Image to image strength",
+ "Threshold": "Noise Threshold",
+ "variations": "Seed-weight pairs",
+ "width": "Width",
+ "workflow": "Workflow"
+ },
+ "modelManager": {
+ "active": "active",
+ "addCheckpointModel": "Add Checkpoint / Safetensor Model",
+ "addDifference": "Add Difference",
+ "addDiffuserModel": "Add Diffusers",
+ "addManually": "Add Manually",
+ "addModel": "Add Model",
"addNew": "Add New",
"addNewModel": "Add New Model",
- "addCheckpointModel": "Add Checkpoint / Safetensor Model",
- "addDiffuserModel": "Add Diffusers",
- "scanForModels": "Scan For Models",
- "addManually": "Add Manually",
- "manual": "Manual",
+ "addSelected": "Add Selected",
+ "advanced": "Advanced",
+ "allModels": "All Models",
+ "alpha": "Alpha",
+ "availableModels": "Available Models",
"baseModel": "Base Model",
- "name": "Name",
- "nameValidationMsg": "Enter a name for your model",
- "description": "Description",
- "descriptionValidationMsg": "Add a description for your model",
+ "cached": "cached",
+ "cannotUseSpaces": "Cannot Use Spaces",
+ "checkpointFolder": "Checkpoint Folder",
+ "checkpointModels": "Checkpoints",
+ "clearCheckpointFolder": "Clear Checkpoint Folder",
+ "closeAdvanced": "Close Advanced",
"config": "Config",
"configValidationMsg": "Path to the config file of your model.",
- "modelLocation": "Model Location",
- "modelLocationValidationMsg": "Path to where your model is located locally.",
- "repo_id": "Repo ID",
- "repoIDValidationMsg": "Online repository of your model",
- "vaeLocation": "VAE Location",
- "vaeLocationValidationMsg": "Path to where your VAE is located.",
- "variant": "Variant",
- "vaeRepoID": "VAE Repo ID",
- "vaeRepoIDValidationMsg": "Online repository of your VAE",
- "width": "Width",
- "widthValidationMsg": "Default width of your model.",
- "height": "Height",
- "heightValidationMsg": "Default height of your model.",
- "addModel": "Add Model",
- "updateModel": "Update Model",
- "availableModels": "Available Models",
- "search": "Search",
- "load": "Load",
- "active": "active",
- "notLoaded": "not loaded",
- "cached": "cached",
- "checkpointFolder": "Checkpoint Folder",
- "clearCheckpointFolder": "Clear Checkpoint Folder",
- "findModels": "Find Models",
- "scanAgain": "Scan Again",
- "modelsFound": "Models Found",
- "selectFolder": "Select Folder",
- "selected": "Selected",
- "selectAll": "Select All",
- "deselectAll": "Deselect All",
- "showExisting": "Show Existing",
- "addSelected": "Add Selected",
- "modelExists": "Model Exists",
- "selectAndAdd": "Select and Add Models Listed Below",
- "noModelsFound": "No Models Found",
- "delete": "Delete",
- "deleteModel": "Delete Model",
- "deleteConfig": "Delete Config",
- "deleteMsg1": "Are you sure you want to delete this model from InvokeAI?",
- "modelDeleted": "Model Deleted",
- "modelDeleteFailed": "Failed to delete model",
- "deleteMsg2": "This WILL delete the model from disk if it is in the InvokeAI root folder. If you are using a custom location, then the model WILL NOT be deleted from disk.",
- "formMessageDiffusersModelLocation": "Diffusers Model Location",
- "formMessageDiffusersModelLocationDesc": "Please enter at least one.",
- "formMessageDiffusersVAELocation": "VAE Location",
- "formMessageDiffusersVAELocationDesc": "If not provided, InvokeAI will look for the VAE file inside the model location given above.",
"convert": "Convert",
+ "convertingModelBegin": "Converting Model. Please wait.",
"convertToDiffusers": "Convert To Diffusers",
"convertToDiffusersHelpText1": "This model will be converted to the 🧨 Diffusers format.",
"convertToDiffusersHelpText2": "This process will replace your Model Manager entry with the Diffusers version of the same model.",
@@ -422,318 +497,492 @@
"convertToDiffusersHelpText5": "Please make sure you have enough disk space. Models generally vary between 2GB-7GB in size.",
"convertToDiffusersHelpText6": "Do you wish to convert this model?",
"convertToDiffusersSaveLocation": "Save Location",
- "noCustomLocationProvided": "No Custom Location Provided",
- "convertingModelBegin": "Converting Model. Please wait.",
- "v1": "v1",
- "v2_base": "v2 (512px)",
- "v2_768": "v2 (768px)",
- "inpainting": "v1 Inpainting",
- "customConfig": "Custom Config",
- "pathToCustomConfig": "Path To Custom Config",
- "statusConverting": "Converting",
- "modelConverted": "Model Converted",
- "modelConversionFailed": "Model Conversion Failed",
- "sameFolder": "Same folder",
- "invokeRoot": "InvokeAI folder",
"custom": "Custom",
+ "customConfig": "Custom Config",
+ "customConfigFileLocation": "Custom Config File Location",
"customSaveLocation": "Custom Save Location",
- "merge": "Merge",
- "modelsMerged": "Models Merged",
- "modelsMergeFailed": "Model Merge Failed",
- "mergeModels": "Merge Models",
- "modelOne": "Model 1",
- "modelTwo": "Model 2",
- "modelThree": "Model 3",
- "mergedModelName": "Merged Model Name",
- "alpha": "Alpha",
- "interpolationType": "Interpolation Type",
- "mergedModelSaveLocation": "Save Location",
- "mergedModelCustomSaveLocation": "Custom Path",
- "invokeAIFolder": "Invoke AI Folder",
+ "delete": "Delete",
+ "deleteConfig": "Delete Config",
+ "deleteModel": "Delete Model",
+ "deleteMsg1": "Are you sure you want to delete this model from InvokeAI?",
+ "deleteMsg2": "This WILL delete the model from disk if it is in the InvokeAI root folder. If you are using a custom location, then the model WILL NOT be deleted from disk.",
+ "description": "Description",
+ "descriptionValidationMsg": "Add a description for your model",
+ "deselectAll": "Deselect All",
+ "diffusersModels": "Diffusers",
+ "findModels": "Find Models",
+ "formMessageDiffusersModelLocation": "Diffusers Model Location",
+ "formMessageDiffusersModelLocationDesc": "Please enter at least one.",
+ "formMessageDiffusersVAELocation": "VAE Location",
+ "formMessageDiffusersVAELocationDesc": "If not provided, InvokeAI will look for the VAE file inside the model location given above.",
+ "height": "Height",
+ "heightValidationMsg": "Default height of your model.",
"ignoreMismatch": "Ignore Mismatches Between Selected Models",
+ "importModels": "Import Models",
+ "inpainting": "v1 Inpainting",
+ "interpolationType": "Interpolation Type",
+ "inverseSigmoid": "Inverse Sigmoid",
+ "invokeAIFolder": "Invoke AI Folder",
+ "invokeRoot": "InvokeAI folder",
+ "load": "Load",
+ "loraModels": "LoRAs",
+ "manual": "Manual",
+ "merge": "Merge",
+ "mergedModelCustomSaveLocation": "Custom Path",
+ "mergedModelName": "Merged Model Name",
+ "mergedModelSaveLocation": "Save Location",
+ "mergeModels": "Merge Models",
+ "model": "Model",
+ "modelAdded": "Model Added",
+ "modelConversionFailed": "Model Conversion Failed",
+ "modelConverted": "Model Converted",
+ "modelDeleted": "Model Deleted",
+ "modelDeleteFailed": "Failed to delete model",
+ "modelEntryDeleted": "Model Entry Deleted",
+ "modelExists": "Model Exists",
+ "modelLocation": "Model Location",
+ "modelLocationValidationMsg": "Provide the path to a local folder where your Diffusers Model is stored",
+ "modelManager": "Model Manager",
+ "modelMergeAlphaHelp": "Alpha controls blend strength for the models. Lower alpha values lead to lower influence of the second model.",
"modelMergeHeaderHelp1": "You can merge up to three different models to create a blend that suits your needs.",
"modelMergeHeaderHelp2": "Only Diffusers are available for merging. If you want to merge a checkpoint model, please convert it to Diffusers first.",
- "modelMergeAlphaHelp": "Alpha controls blend strength for the models. Lower alpha values lead to lower influence of the second model.",
"modelMergeInterpAddDifferenceHelp": "In this mode, Model 3 is first subtracted from Model 2. The resulting version is blended with Model 1 with the alpha rate set above.",
- "inverseSigmoid": "Inverse Sigmoid",
- "sigmoid": "Sigmoid",
- "weightedSum": "Weighted Sum",
+ "modelOne": "Model 1",
+ "modelsFound": "Models Found",
+ "modelsMerged": "Models Merged",
+ "modelsMergeFailed": "Model Merge Failed",
+ "modelsSynced": "Models Synced",
+ "modelSyncFailed": "Model Sync Failed",
+ "modelThree": "Model 3",
+ "modelTwo": "Model 2",
+ "modelType": "Model Type",
+ "modelUpdated": "Model Updated",
+ "modelUpdateFailed": "Model Update Failed",
+ "name": "Name",
+ "nameValidationMsg": "Enter a name for your model",
+ "noCustomLocationProvided": "No Custom Location Provided",
+ "noModels": "No Models Found",
+ "noModelsFound": "No Models Found",
"none": "none",
- "addDifference": "Add Difference",
+ "notLoaded": "not loaded",
+ "oliveModels": "Olives",
+ "onnxModels": "Onnx",
+ "pathToCustomConfig": "Path To Custom Config",
"pickModelType": "Pick Model Type",
+ "predictionType": "Prediction Type (for Stable Diffusion 2.x Models only)",
+ "quickAdd": "Quick Add",
+ "repo_id": "Repo ID",
+ "repoIDValidationMsg": "Online repository of your model",
+ "safetensorModels": "SafeTensors",
+ "sameFolder": "Same folder",
+ "scanAgain": "Scan Again",
+ "scanForModels": "Scan For Models",
+ "search": "Search",
+ "selectAll": "Select All",
+ "selectAndAdd": "Select and Add Models Listed Below",
+ "selected": "Selected",
+ "selectFolder": "Select Folder",
"selectModel": "Select Model",
- "importModels": "Import Models",
"settings": "Settings",
+ "showExisting": "Show Existing",
+ "sigmoid": "Sigmoid",
+ "simpleModelDesc": "Provide a path to a local Diffusers model, local checkpoint / safetensors model a HuggingFace Repo ID, or a checkpoint/diffusers model URL.",
+ "statusConverting": "Converting",
"syncModels": "Sync Models",
"syncModelsDesc": "If your models are out of sync with the backend, you can refresh them up using this option. This is generally handy in cases where you manually update your models.yaml file or add models to the InvokeAI root folder after the application has booted.",
- "modelsSynced": "Models Synced",
- "modelSyncFailed": "Model Sync Failed"
+ "updateModel": "Update Model",
+ "useCustomConfig": "Use Custom Config",
+ "v1": "v1",
+ "v2_768": "v2 (768px)",
+ "v2_base": "v2 (512px)",
+ "vae": "VAE",
+ "vaeLocation": "VAE Location",
+ "vaeLocationValidationMsg": "Path to where your VAE is located.",
+ "vaeRepoID": "VAE Repo ID",
+ "vaeRepoIDValidationMsg": "Online repository of your VAE",
+ "variant": "Variant",
+ "weightedSum": "Weighted Sum",
+ "width": "Width",
+ "widthValidationMsg": "Default width of your model."
+ },
+ "models": {
+ "loading": "loading",
+ "noLoRAsAvailable": "No LoRAs available",
+ "noMatchingLoRAs": "No matching LoRAs",
+ "noMatchingModels": "No matching Models",
+ "noModelsAvailable": "No Modelss available",
+ "selectLoRA": "Select a LoRA",
+ "selectModel": "Select a Model"
+ },
+ "nodes": {
+ "addNode": "Add Node",
+ "addNodeToolTip": "Add Node (Shift+A, Space)",
+ "animatedEdges": "Animated Edges",
+ "animatedEdgesHelp": "Animate selected edges and edges connected to selected nodes",
+ "cannotConnectInputToInput": "Cannot connect input to input",
+ "cannotConnectOutputToOutput": "Cannot connect output to output",
+ "cannotConnectToSelf": "Cannot connect to self",
+ "colorCodeEdges": "Color-Code Edges",
+ "colorCodeEdgesHelp": "Color-code edges according to their connected fields",
+ "connectionWouldCreateCycle": "Connection would create a cycle",
+ "currentImage": "Current Image",
+ "currentImageDescription": "Displays the current image in the Node Editor",
+ "downloadWorkflow": "Download Workflow JSON",
+ "fieldTypesMustMatch": "Field types must match",
+ "fitViewportNodes": "Fit View",
+ "fullyContainNodes": "Fully Contain Nodes to Select",
+ "fullyContainNodesHelp": "Nodes must be fully inside the selection box to be selected",
+ "hideGraphNodes": "Hide Graph Overlay",
+ "hideLegendNodes": "Hide Field Type Legend",
+ "hideMinimapnodes": "Hide MiniMap",
+ "inputMayOnlyHaveOneConnection": "Input may only have one connection",
+ "loadingNodes": "Loading Nodes...",
+ "loadWorkflow": "Load Workflow",
+ "noConnectionData": "No connection data",
+ "noConnectionInProgress": "No connection in progress",
+ "nodeOutputs": "Node Outputs",
+ "nodeSearch": "Search for nodes",
+ "nodeTemplate": "Node Template",
+ "noFieldsLinearview": "No fields added to Linear View",
+ "noFieldType": "No field type",
+ "noMatchingNodes": "No matching nodes",
+ "noNodeSelected": "No node selected",
+ "noOpacity": "Node Opacity",
+ "noOutputRecorded": "No outputs recorded",
+ "notes": "Notes",
+ "notesDescription": "Add notes about your workflow",
+ "pickOne": "Pick One",
+ "problemSettingTitle": "Problem Setting Title",
+ "reloadNodeTemplates": "Reload Node Templates",
+ "removeLinearView": "Remove from Linear View",
+ "resetWorkflow": "Reset Workflow",
+ "resetWorkflowDesc": "Are you sure you want to reset this workflow?",
+ "resetWorkflowDesc2": "Resetting the workflow will clear all nodes, edges and workflow details.",
+ "showGraphNodes": "Show Graph Overlay",
+ "showLegendNodes": "Show Field Type Legend",
+ "showMinimapnodes": "Show MiniMap",
+ "snapToGrid": "Snap to Grid",
+ "snapToGridHelp": "Snap nodes to grid when moved",
+ "unableToLoadWorkflow": "Unable to Validate Workflow",
+ "unableToValidateWorkflow": "Unable to Validate Workflow",
+ "unknownField": "Unknown Field",
+ "unkownInvocation": "Unknown Invocation type",
+ "validateConnections": "Validate Connections and Graph",
+ "validateConnectionsHelp": "Prevent invalid connections from being made, and invalid graphs from being invoked",
+ "workflow": "Workflow",
+ "workflowAuthor": "Author",
+ "workflowContact": "Contact",
+ "workflowDescription": "Short Description",
+ "workflowName": "Name",
+ "workflowNotes": "Notes",
+ "workflowSettings": "Workflow Editor Settings",
+ "workflowTags": "Tags",
+ "workflowValidation": "Workflow Validation Error",
+ "workflowVersion": "Version",
+ "zoomInNodes": "Zoom In",
+ "zoomOutNodes": "Zoom Out",
+ "executionStateError": "Error",
+ "executionStateCompleted": "Completed",
+ "executionStateInProgress": "In Progress",
+ "versionUnknown": " Version Unknown",
+ "unknownNode": "Unknown Node",
+ "version": "Version",
+ "updateApp": "Update App",
+ "unknownTemplate": "Unknown Template"
},
"parameters": {
- "general": "General",
- "images": "Images",
- "steps": "Steps",
- "cfgScale": "CFG Scale",
- "width": "Width",
- "height": "Height",
- "scheduler": "Scheduler",
- "seed": "Seed",
- "boundingBoxWidth": "Bounding Box Width",
+ "aspectRatio": "Ratio",
+ "boundingBoxHeader": "Bounding Box",
"boundingBoxHeight": "Bounding Box Height",
- "imageToImage": "Image to Image",
- "randomizeSeed": "Randomize Seed",
- "shuffle": "Shuffle Seed",
- "noiseThreshold": "Noise Threshold",
- "perlinNoise": "Perlin Noise",
- "noiseSettings": "Noise",
- "variations": "Variations",
- "variationAmount": "Variation Amount",
- "seedWeights": "Seed Weights",
- "faceRestoration": "Face Restoration",
- "restoreFaces": "Restore Faces",
- "type": "Type",
- "strength": "Strength",
- "upscaling": "Upscaling",
- "upscale": "Upscale",
- "upscaleImage": "Upscale Image",
+ "boundingBoxWidth": "Bounding Box Width",
+ "cancel": {
+ "cancel": "Cancel",
+ "immediate": "Cancel immediately",
+ "isScheduled": "Canceling",
+ "schedule": "Cancel after current iteration",
+ "setType": "Set cancel type"
+ },
+ "cfgScale": "CFG Scale",
+ "clipSkip": "CLIP Skip",
+ "closeViewer": "Close Viewer",
+ "codeformerFidelity": "Fidelity",
+ "coherenceMode": "Mode",
+ "coherencePassHeader": "Coherence Pass",
+ "coherenceSteps": "Steps",
+ "coherenceStrength": "Strength",
+ "compositingSettingsHeader": "Compositing Settings",
+ "controlNetControlMode": "Control Mode",
+ "copyImage": "Copy Image",
+ "copyImageToLink": "Copy Image To Link",
"denoisingStrength": "Denoising Strength",
- "scale": "Scale",
- "otherOptions": "Other Options",
- "seamlessTiling": "Seamless Tiling",
- "seamlessXAxis": "X Axis",
- "seamlessYAxis": "Y Axis",
+ "downloadImage": "Download Image",
+ "enableNoiseSettings": "Enable Noise Settings",
+ "faceRestoration": "Face Restoration",
+ "general": "General",
+ "height": "Height",
+ "hidePreview": "Hide Preview",
"hiresOptim": "High Res Optimization",
"hiresStrength": "High Res Strength",
+ "hSymmetryStep": "H Symmetry Step",
"imageFit": "Fit Initial Image To Output Size",
- "codeformerFidelity": "Fidelity",
- "compositingSettingsHeader": "Compositing Settings",
+ "images": "Images",
+ "imageToImage": "Image to Image",
+ "img2imgStrength": "Image To Image Strength",
+ "infillMethod": "Infill Method",
+ "infillScalingHeader": "Infill and Scaling",
+ "info": "Info",
+ "initialImage": "Initial Image",
+ "invoke": {
+ "addingImagesTo": "Adding images to",
+ "invoke": "Invoke",
+ "missingFieldTemplate": "Missing field template",
+ "missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} missing input",
+ "missingNodeTemplate": "Missing node template",
+ "noControlImageForControlNet": "ControlNet {{index}} has no control image",
+ "noInitialImageSelected": "No initial image selected",
+ "noModelForControlNet": "ControlNet {{index}} has no model selected.",
+ "noModelSelected": "No model selected",
+ "noNodesInGraph": "No nodes in graph",
+ "readyToInvoke": "Ready to Invoke",
+ "systemBusy": "System busy",
+ "systemDisconnected": "System disconnected",
+ "unableToInvoke": "Unable to Invoke"
+ },
"maskAdjustmentsHeader": "Mask Adjustments",
"maskBlur": "Blur",
"maskBlurMethod": "Blur Method",
- "coherencePassHeader": "Coherence Pass",
- "coherenceMode": "Mode",
- "coherenceSteps": "Steps",
- "coherenceStrength": "Strength",
- "seamLowThreshold": "Low",
- "seamHighThreshold": "High",
- "scaleBeforeProcessing": "Scale Before Processing",
- "scaledWidth": "Scaled W",
- "scaledHeight": "Scaled H",
- "infillMethod": "Infill Method",
- "tileSize": "Tile Size",
- "patchmatchDownScaleSize": "Downscale",
- "boundingBoxHeader": "Bounding Box",
- "seamCorrectionHeader": "Seam Correction",
- "infillScalingHeader": "Infill and Scaling",
- "img2imgStrength": "Image To Image Strength",
- "toggleLoopback": "Toggle Loopback",
- "symmetry": "Symmetry",
- "hSymmetryStep": "H Symmetry Step",
- "vSymmetryStep": "V Symmetry Step",
- "invoke": "Invoke",
- "cancel": {
- "immediate": "Cancel immediately",
- "schedule": "Cancel after current iteration",
- "isScheduled": "Canceling",
- "setType": "Set cancel type"
- },
- "positivePromptPlaceholder": "Positive Prompt",
"negativePromptPlaceholder": "Negative Prompt",
+ "noiseSettings": "Noise",
+ "noiseThreshold": "Noise Threshold",
+ "openInViewer": "Open In Viewer",
+ "otherOptions": "Other Options",
+ "patchmatchDownScaleSize": "Downscale",
+ "perlinNoise": "Perlin Noise",
+ "positivePromptPlaceholder": "Positive Prompt",
+ "randomizeSeed": "Randomize Seed",
+ "restoreFaces": "Restore Faces",
+ "scale": "Scale",
+ "scaleBeforeProcessing": "Scale Before Processing",
+ "scaledHeight": "Scaled H",
+ "scaledWidth": "Scaled W",
+ "scheduler": "Scheduler",
+ "seamCorrectionHeader": "Seam Correction",
+ "seamHighThreshold": "High",
+ "seamlessTiling": "Seamless Tiling",
+ "seamlessXAxis": "X Axis",
+ "seamlessYAxis": "Y Axis",
+ "seamLowThreshold": "Low",
+ "seed": "Seed",
+ "seedWeights": "Seed Weights",
"sendTo": "Send to",
"sendToImg2Img": "Send to Image to Image",
"sendToUnifiedCanvas": "Send To Unified Canvas",
- "copyImage": "Copy Image",
- "copyImageToLink": "Copy Image To Link",
- "downloadImage": "Download Image",
- "openInViewer": "Open In Viewer",
- "closeViewer": "Close Viewer",
+ "showOptionsPanel": "Show Options Panel",
+ "showPreview": "Show Preview",
+ "shuffle": "Shuffle Seed",
+ "steps": "Steps",
+ "strength": "Strength",
+ "symmetry": "Symmetry",
+ "tileSize": "Tile Size",
+ "toggleLoopback": "Toggle Loopback",
+ "type": "Type",
+ "upscale": "Upscale",
+ "upscaleImage": "Upscale Image",
+ "upscaling": "Upscaling",
+ "useAll": "Use All",
+ "useCpuNoise": "Use CPU Noise",
+ "useInitImg": "Use Initial Image",
"usePrompt": "Use Prompt",
"useSeed": "Use Seed",
- "useAll": "Use All",
- "useInitImg": "Use Initial Image",
- "info": "Info",
- "initialImage": "Initial Image",
- "showOptionsPanel": "Show Options Panel",
- "hidePreview": "Hide Preview",
- "showPreview": "Show Preview",
- "controlNetControlMode": "Control Mode",
- "clipSkip": "CLIP Skip",
- "aspectRatio": "Ratio"
+ "variationAmount": "Variation Amount",
+ "variations": "Variations",
+ "vSymmetryStep": "V Symmetry Step",
+ "width": "Width"
+ },
+ "prompt": {
+ "combinatorial": "Combinatorial Generation",
+ "dynamicPrompts": "Dynamic Prompts",
+ "enableDynamicPrompts": "Enable Dynamic Prompts",
+ "maxPrompts": "Max Prompts"
+ },
+ "sdxl": {
+ "cfgScale": "CFG Scale",
+ "concatPromptStyle": "Concatenate Prompt & Style",
+ "denoisingStrength": "Denoising Strength",
+ "loading": "Loading...",
+ "negAestheticScore": "Negative Aesthetic Score",
+ "negStylePrompt": "Negative Style Prompt",
+ "noModelsAvailable": "No models available",
+ "posAestheticScore": "Positive Aesthetic Score",
+ "posStylePrompt": "Positive Style Prompt",
+ "refiner": "Refiner",
+ "refinermodel": "Refiner Model",
+ "refinerStart": "Refiner Start",
+ "scheduler": "Scheduler",
+ "selectAModel": "Select a model",
+ "steps": "Steps",
+ "useRefiner": "Use Refiner"
},
"settings": {
- "models": "Models",
- "displayInProgress": "Display Progress Images",
- "saveSteps": "Save images every n steps",
- "confirmOnDelete": "Confirm On Delete",
- "displayHelpIcons": "Display Help Icons",
"alternateCanvasLayout": "Alternate Canvas Layout",
- "enableNodesEditor": "Enable Nodes Editor",
- "enableImageDebugging": "Enable Image Debugging",
- "useSlidersForAll": "Use Sliders For All Options",
- "showProgressInViewer": "Show Progress Images in Viewer",
"antialiasProgressImages": "Antialias Progress Images",
"autoChangeDimensions": "Update W/H To Model Defaults On Change",
+ "beta": "Beta",
+ "confirmOnDelete": "Confirm On Delete",
+ "consoleLogLevel": "Log Level",
+ "developer": "Developer",
+ "displayHelpIcons": "Display Help Icons",
+ "displayInProgress": "Display Progress Images",
+ "enableImageDebugging": "Enable Image Debugging",
+ "enableNodesEditor": "Enable Nodes Editor",
+ "experimental": "Experimental",
+ "favoriteSchedulers": "Favorite Schedulers",
+ "favoriteSchedulersPlaceholder": "No schedulers favorited",
+ "general": "General",
+ "generation": "Generation",
+ "models": "Models",
+ "resetComplete": "Web UI has been reset.",
"resetWebUI": "Reset Web UI",
"resetWebUIDesc1": "Resetting the web UI only resets the browser's local cache of your images and remembered settings. It does not delete any images from disk.",
"resetWebUIDesc2": "If images aren't showing up in the gallery or something else isn't working, please try resetting before submitting an issue on GitHub.",
- "resetComplete": "Web UI has been reset.",
- "consoleLogLevel": "Log Level",
+ "saveSteps": "Save images every n steps",
"shouldLogToConsole": "Console Logging",
- "developer": "Developer",
- "general": "General",
- "generation": "Generation",
- "ui": "User Interface",
- "favoriteSchedulers": "Favorite Schedulers",
- "favoriteSchedulersPlaceholder": "No schedulers favorited",
"showAdvancedOptions": "Show Advanced Options",
- "experimental": "Experimental",
- "beta": "Beta"
+ "showProgressInViewer": "Show Progress Images in Viewer",
+ "ui": "User Interface",
+ "useSlidersForAll": "Use Sliders For All Options"
},
"toast": {
- "serverError": "Server Error",
- "disconnected": "Disconnected from Server",
- "connected": "Connected to Server",
"canceled": "Processing Canceled",
- "tempFoldersEmptied": "Temp Folder Emptied",
- "uploadFailed": "Upload failed",
- "uploadFailedUnableToLoadDesc": "Unable to load file",
- "uploadFailedInvalidUploadDesc": "Must be single PNG or JPEG image",
+ "canvasMerged": "Canvas Merged",
+ "connected": "Connected to Server",
+ "disconnected": "Disconnected from Server",
"downloadImageStarted": "Image Download Started",
+ "faceRestoreFailed": "Face Restoration Failed",
"imageCopied": "Image Copied",
- "problemCopyingImage": "Unable to Copy Image",
"imageLinkCopied": "Image Link Copied",
- "problemCopyingImageLink": "Unable to Copy Image Link",
"imageNotLoaded": "No Image Loaded",
"imageNotLoadedDesc": "Could not find image",
"imageSavedToGallery": "Image Saved to Gallery",
- "canvasMerged": "Canvas Merged",
- "sentToImageToImage": "Sent To Image To Image",
- "sentToUnifiedCanvas": "Sent to Unified Canvas",
- "parameterSet": "Parameter set",
- "parameterNotSet": "Parameter not set",
- "parametersSet": "Parameters Set",
- "parametersNotSet": "Parameters Not Set",
- "parametersNotSetDesc": "No metadata found for this image.",
- "parametersFailed": "Problem loading parameters",
- "parametersFailedDesc": "Unable to load init image.",
- "seedSet": "Seed Set",
- "seedNotSet": "Seed Not Set",
- "seedNotSetDesc": "Could not find seed for this image.",
- "promptSet": "Prompt Set",
- "promptNotSet": "Prompt Not Set",
- "promptNotSetDesc": "Could not find prompt for this image.",
- "upscalingFailed": "Upscaling Failed",
- "faceRestoreFailed": "Face Restoration Failed",
- "metadataLoadFailed": "Failed to load metadata",
- "initialImageSet": "Initial Image Set",
"initialImageNotSet": "Initial Image Not Set",
"initialImageNotSetDesc": "Could not load initial image",
- "nodesSaved": "Nodes Saved",
+ "initialImageSet": "Initial Image Set",
+ "metadataLoadFailed": "Failed to load metadata",
+ "modelAdded": "Model Added: {{modelName}}",
+ "modelAddedSimple": "Model Added",
+ "modelAddFailed": "Model Add Failed",
+ "nodesBrokenConnections": "Cannot load. Some connections are broken.",
+ "nodesCleared": "Nodes Cleared",
+ "nodesCorruptedGraph": "Cannot load. Graph seems to be corrupted.",
"nodesLoaded": "Nodes Loaded",
+ "nodesLoadedFailed": "Failed To Load Nodes",
"nodesNotValidGraph": "Not a valid InvokeAI Node Graph",
"nodesNotValidJSON": "Not a valid JSON",
- "nodesCorruptedGraph": "Cannot load. Graph seems to be corrupted.",
+ "nodesSaved": "Nodes Saved",
"nodesUnrecognizedTypes": "Cannot load. Graph has unrecognized types",
- "nodesBrokenConnections": "Cannot load. Some connections are broken.",
- "nodesLoadedFailed": "Failed To Load Nodes",
- "nodesCleared": "Nodes Cleared"
+ "parameterNotSet": "Parameter not set",
+ "parameterSet": "Parameter set",
+ "parametersFailed": "Problem loading parameters",
+ "parametersFailedDesc": "Unable to load init image.",
+ "parametersNotSet": "Parameters Not Set",
+ "parametersNotSetDesc": "No metadata found for this image.",
+ "parametersSet": "Parameters Set",
+ "problemCopyingImage": "Unable to Copy Image",
+ "problemCopyingImageLink": "Unable to Copy Image Link",
+ "promptNotSet": "Prompt Not Set",
+ "promptNotSetDesc": "Could not find prompt for this image.",
+ "promptSet": "Prompt Set",
+ "seedNotSet": "Seed Not Set",
+ "seedNotSetDesc": "Could not find seed for this image.",
+ "seedSet": "Seed Set",
+ "sentToImageToImage": "Sent To Image To Image",
+ "sentToUnifiedCanvas": "Sent to Unified Canvas",
+ "serverError": "Server Error",
+ "tempFoldersEmptied": "Temp Folder Emptied",
+ "uploadFailed": "Upload failed",
+ "uploadFailedInvalidUploadDesc": "Must be single PNG or JPEG image",
+ "uploadFailedUnableToLoadDesc": "Unable to load file",
+ "upscalingFailed": "Upscaling Failed"
},
"tooltip": {
"feature": {
- "prompt": "This is the prompt field. Prompt includes generation objects and stylistic terms. You can add weight (token importance) in the prompt as well, but CLI commands and parameters will not work.",
- "gallery": "Gallery displays generations from the outputs folder as they're created. Settings are stored within files and accesed by context menu.",
- "other": "These options will enable alternative processing modes for Invoke. 'Seamless tiling' will create repeating patterns in the output. 'High resolution' is generation in two steps with img2img: use this setting when you want a larger and more coherent image without artifacts. It will take longer than usual txt2img.",
- "seed": "Seed value affects the initial noise from which the image is formed. You can use the already existing seeds from previous images. 'Noise Threshold' is used to mitigate artifacts at high CFG values (try the 0-10 range), and Perlin to add Perlin noise during generation: both serve to add variation to your outputs.",
- "variations": "Try a variation with a value between 0.1 and 1.0 to change the result for a given seed. Interesting variations of the seed are between 0.1 and 0.3.",
- "upscale": "Use ESRGAN to enlarge the image immediately after generation.",
- "faceCorrection": "Face correction with GFPGAN or Codeformer: the algorithm detects faces in the image and corrects any defects. High value will change the image more, resulting in more attractive faces. Codeformer with a higher fidelity preserves the original image at the expense of stronger face correction.",
- "imageToImage": "Image to Image loads any image as initial, which is then used to generate a new one along with the prompt. The higher the value, the more the result image will change. Values from 0.0 to 1.0 are possible, the recommended range is .25-.75",
"boundingBox": "The bounding box is the same as the Width and Height settings for Text to Image or Image to Image. Only the area in the box will be processed.",
+ "faceCorrection": "Face correction with GFPGAN or Codeformer: the algorithm detects faces in the image and corrects any defects. High value will change the image more, resulting in more attractive faces. Codeformer with a higher fidelity preserves the original image at the expense of stronger face correction.",
+ "gallery": "Gallery displays generations from the outputs folder as they're created. Settings are stored within files and accesed by context menu.",
+ "imageToImage": "Image to Image loads any image as initial, which is then used to generate a new one along with the prompt. The higher the value, the more the result image will change. Values from 0.0 to 1.0 are possible, the recommended range is .25-.75",
+ "infillAndScaling": "Manage infill methods (used on masked or erased areas of the canvas) and scaling (useful for small bounding box sizes).",
+ "other": "These options will enable alternative processing modes for Invoke. 'Seamless tiling' will create repeating patterns in the output. 'High resolution' is generation in two steps with img2img: use this setting when you want a larger and more coherent image without artifacts. It will take longer than usual txt2img.",
+ "prompt": "This is the prompt field. Prompt includes generation objects and stylistic terms. You can add weight (token importance) in the prompt as well, but CLI commands and parameters will not work.",
"seamCorrection": "Controls the handling of visible seams that occur between generated images on the canvas.",
- "infillAndScaling": "Manage infill methods (used on masked or erased areas of the canvas) and scaling (useful for small bounding box sizes)."
+ "seed": "Seed value affects the initial noise from which the image is formed. You can use the already existing seeds from previous images. 'Noise Threshold' is used to mitigate artifacts at high CFG values (try the 0-10 range), and Perlin to add Perlin noise during generation: both serve to add variation to your outputs.",
+ "upscale": "Use ESRGAN to enlarge the image immediately after generation.",
+ "variations": "Try a variation with a value between 0.1 and 1.0 to change the result for a given seed. Interesting variations of the seed are between 0.1 and 0.3."
}
},
+ "ui": {
+ "hideProgressImages": "Hide Progress Images",
+ "lockRatio": "Lock Ratio",
+ "showProgressImages": "Show Progress Images",
+ "swapSizes": "Swap Sizes"
+ },
"unifiedCanvas": {
- "layer": "Layer",
- "base": "Base",
- "mask": "Mask",
- "maskingOptions": "Masking Options",
- "enableMask": "Enable Mask",
- "preserveMaskedArea": "Preserve Masked Area",
- "clearMask": "Clear Mask",
- "brush": "Brush",
- "eraser": "Eraser",
- "fillBoundingBox": "Fill Bounding Box",
- "eraseBoundingBox": "Erase Bounding Box",
- "colorPicker": "Color Picker",
- "brushOptions": "Brush Options",
- "brushSize": "Size",
- "move": "Move",
- "resetView": "Reset View",
- "mergeVisible": "Merge Visible",
- "saveToGallery": "Save To Gallery",
- "copyToClipboard": "Copy to Clipboard",
- "downloadAsImage": "Download As Image",
- "undo": "Undo",
- "redo": "Redo",
- "clearCanvas": "Clear Canvas",
- "canvasSettings": "Canvas Settings",
- "showIntermediates": "Show Intermediates",
- "showGrid": "Show Grid",
- "snapToGrid": "Snap to Grid",
- "darkenOutsideSelection": "Darken Outside Selection",
- "autoSaveToGallery": "Auto Save to Gallery",
- "saveBoxRegionOnly": "Save Box Region Only",
- "limitStrokesToBox": "Limit Strokes to Box",
- "showCanvasDebugInfo": "Show Additional Canvas Info",
- "clearCanvasHistory": "Clear Canvas History",
- "clearHistory": "Clear History",
- "clearCanvasHistoryMessage": "Clearing the canvas history leaves your current canvas intact, but irreversibly clears the undo and redo history.",
- "clearCanvasHistoryConfirm": "Are you sure you want to clear the canvas history?",
- "emptyTempImageFolder": "Empty Temp Image Folder",
- "emptyFolder": "Empty Folder",
- "emptyTempImagesFolderMessage": "Emptying the temp image folder also fully resets the Unified Canvas. This includes all undo/redo history, images in the staging area, and the canvas base layer.",
- "emptyTempImagesFolderConfirm": "Are you sure you want to empty the temp folder?",
- "activeLayer": "Active Layer",
- "canvasScale": "Canvas Scale",
- "boundingBox": "Bounding Box",
- "scaledBoundingBox": "Scaled Bounding Box",
- "boundingBoxPosition": "Bounding Box Position",
- "canvasDimensions": "Canvas Dimensions",
- "canvasPosition": "Canvas Position",
- "cursorPosition": "Cursor Position",
- "previous": "Previous",
- "next": "Next",
"accept": "Accept",
- "showHide": "Show/Hide",
- "discardAll": "Discard All",
+ "activeLayer": "Active Layer",
+ "antialiasing": "Antialiasing",
+ "autoSaveToGallery": "Auto Save to Gallery",
+ "base": "Base",
"betaClear": "Clear",
"betaDarkenOutside": "Darken Outside",
"betaLimitToBox": "Limit To Box",
"betaPreserveMasked": "Preserve Masked",
- "antialiasing": "Antialiasing"
- },
- "ui": {
- "showProgressImages": "Show Progress Images",
- "hideProgressImages": "Hide Progress Images",
- "swapSizes": "Swap Sizes",
- "lockRatio": "Lock Ratio"
- },
- "nodes": {
- "reloadNodeTemplates": "Reload Node Templates",
- "downloadWorkflow": "Download Workflow JSON",
- "loadWorkflow": "Load Workflow",
- "resetWorkflow": "Reset Workflow",
- "resetWorkflowDesc": "Are you sure you want to reset this workflow?",
- "resetWorkflowDesc2": "Resetting the workflow will clear all nodes, edges and workflow details.",
- "zoomInNodes": "Zoom In",
- "zoomOutNodes": "Zoom Out",
- "fitViewportNodes": "Fit View",
- "hideGraphNodes": "Hide Graph Overlay",
- "showGraphNodes": "Show Graph Overlay",
- "hideLegendNodes": "Hide Field Type Legend",
- "showLegendNodes": "Show Field Type Legend",
- "hideMinimapnodes": "Hide MiniMap",
- "showMinimapnodes": "Show MiniMap"
+ "boundingBox": "Bounding Box",
+ "boundingBoxPosition": "Bounding Box Position",
+ "brush": "Brush",
+ "brushOptions": "Brush Options",
+ "brushSize": "Size",
+ "canvasDimensions": "Canvas Dimensions",
+ "canvasPosition": "Canvas Position",
+ "canvasScale": "Canvas Scale",
+ "canvasSettings": "Canvas Settings",
+ "clearCanvas": "Clear Canvas",
+ "clearCanvasHistory": "Clear Canvas History",
+ "clearCanvasHistoryConfirm": "Are you sure you want to clear the canvas history?",
+ "clearCanvasHistoryMessage": "Clearing the canvas history leaves your current canvas intact, but irreversibly clears the undo and redo history.",
+ "clearHistory": "Clear History",
+ "clearMask": "Clear Mask",
+ "colorPicker": "Color Picker",
+ "copyToClipboard": "Copy to Clipboard",
+ "cursorPosition": "Cursor Position",
+ "darkenOutsideSelection": "Darken Outside Selection",
+ "discardAll": "Discard All",
+ "downloadAsImage": "Download As Image",
+ "emptyFolder": "Empty Folder",
+ "emptyTempImageFolder": "Empty Temp Image Folder",
+ "emptyTempImagesFolderConfirm": "Are you sure you want to empty the temp folder?",
+ "emptyTempImagesFolderMessage": "Emptying the temp image folder also fully resets the Unified Canvas. This includes all undo/redo history, images in the staging area, and the canvas base layer.",
+ "enableMask": "Enable Mask",
+ "eraseBoundingBox": "Erase Bounding Box",
+ "eraser": "Eraser",
+ "fillBoundingBox": "Fill Bounding Box",
+ "layer": "Layer",
+ "limitStrokesToBox": "Limit Strokes to Box",
+ "mask": "Mask",
+ "maskingOptions": "Masking Options",
+ "mergeVisible": "Merge Visible",
+ "move": "Move",
+ "next": "Next",
+ "preserveMaskedArea": "Preserve Masked Area",
+ "previous": "Previous",
+ "redo": "Redo",
+ "resetView": "Reset View",
+ "saveBoxRegionOnly": "Save Box Region Only",
+ "saveToGallery": "Save To Gallery",
+ "scaledBoundingBox": "Scaled Bounding Box",
+ "showCanvasDebugInfo": "Show Additional Canvas Info",
+ "showGrid": "Show Grid",
+ "showHide": "Show/Hide",
+ "showIntermediates": "Show Intermediates",
+ "snapToGrid": "Snap to Grid",
+ "undo": "Undo"
}
}
diff --git a/invokeai/frontend/web/src/common/hooks/useIsReadyToInvoke.ts b/invokeai/frontend/web/src/common/hooks/useIsReadyToInvoke.ts
index 8eaabeeedf..dd21afe459 100644
--- a/invokeai/frontend/web/src/common/hooks/useIsReadyToInvoke.ts
+++ b/invokeai/frontend/web/src/common/hooks/useIsReadyToInvoke.ts
@@ -6,6 +6,7 @@ import { isInvocationNode } from 'features/nodes/types/types';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import { forEach, map } from 'lodash-es';
import { getConnectedEdges } from 'reactflow';
+import i18n from 'i18next';
const selector = createSelector(
[stateSelector, activeTabNameSelector],
@@ -19,22 +20,22 @@ const selector = createSelector(
// Cannot generate if already processing an image
if (isProcessing) {
- reasons.push('System busy');
+ reasons.push(i18n.t('parameters.invoke.systemBusy'));
}
// Cannot generate if not connected
if (!isConnected) {
- reasons.push('System disconnected');
+ reasons.push(i18n.t('parameters.invoke.systemDisconnected'));
}
if (activeTabName === 'img2img' && !initialImage) {
- reasons.push('No initial image selected');
+ reasons.push(i18n.t('parameters.invoke.noInitialImageSelected'));
}
if (activeTabName === 'nodes') {
if (nodes.shouldValidateGraph) {
if (!nodes.nodes.length) {
- reasons.push('No nodes in graph');
+ reasons.push(i18n.t('parameters.invoke.noNodesInGraph'));
}
nodes.nodes.forEach((node) => {
@@ -46,7 +47,7 @@ const selector = createSelector(
if (!nodeTemplate) {
// Node type not found
- reasons.push('Missing node template');
+ reasons.push(i18n.t('parameters.invoke.missingNodeTemplate'));
return;
}
@@ -60,7 +61,7 @@ const selector = createSelector(
);
if (!fieldTemplate) {
- reasons.push('Missing field template');
+ reasons.push(i18n.t('parameters.invoke.missingFieldTemplate'));
return;
}
@@ -70,9 +71,10 @@ const selector = createSelector(
!hasConnection
) {
reasons.push(
- `${node.data.label || nodeTemplate.title} -> ${
- field.label || fieldTemplate.title
- } missing input`
+ i18n.t('parameters.invoke.missingInputForField', {
+ nodeLabel: node.data.label || nodeTemplate.title,
+ fieldLabel: field.label || fieldTemplate.title,
+ })
);
return;
}
@@ -81,7 +83,7 @@ const selector = createSelector(
}
} else {
if (!model) {
- reasons.push('No model selected');
+ reasons.push(i18n.t('parameters.invoke.noModelSelected'));
}
if (state.controlNet.isEnabled) {
@@ -90,7 +92,9 @@ const selector = createSelector(
return;
}
if (!controlNet.model) {
- reasons.push(`ControlNet ${i + 1} has no model selected.`);
+ reasons.push(
+ i18n.t('parameters.invoke.noModelForControlNet', { index: i + 1 })
+ );
}
if (
@@ -98,7 +102,11 @@ const selector = createSelector(
(!controlNet.processedControlImage &&
controlNet.processorType !== 'none')
) {
- reasons.push(`ControlNet ${i + 1} has no control image`);
+ reasons.push(
+ i18n.t('parameters.invoke.noControlImageForControlNet', {
+ index: i + 1,
+ })
+ );
}
});
}
diff --git a/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx b/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx
index 2443fa6081..6bdf434d52 100644
--- a/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx
+++ b/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx
@@ -21,6 +21,7 @@ import {
useRemoveImagesFromBoardMutation,
} from 'services/api/endpoints/images';
import { changeBoardReset, isModalOpenChanged } from '../store/slice';
+import { useTranslation } from 'react-i18next';
const selector = createSelector(
[stateSelector],
@@ -42,10 +43,11 @@ const ChangeBoardModal = () => {
const { imagesToChange, isModalOpen } = useAppSelector(selector);
const [addImagesToBoard] = useAddImagesToBoardMutation();
const [removeImagesFromBoard] = useRemoveImagesFromBoardMutation();
+ const { t } = useTranslation();
const data = useMemo(() => {
const data: { label: string; value: string }[] = [
- { label: 'Uncategorized', value: 'none' },
+ { label: t('boards.uncategorized'), value: 'none' },
];
(boards ?? []).forEach((board) =>
data.push({
@@ -55,7 +57,7 @@ const ChangeBoardModal = () => {
);
return data;
- }, [boards]);
+ }, [boards, t]);
const handleClose = useCallback(() => {
dispatch(changeBoardReset());
@@ -97,7 +99,7 @@ const ChangeBoardModal = () => {
- Change Board
+ {t('boards.changeBoard')}
@@ -107,7 +109,9 @@ const ChangeBoardModal = () => {
{`${imagesToChange.length > 1 ? 's' : ''}`} to board:
setSelectedBoard(v)}
value={selectedBoard}
@@ -117,10 +121,10 @@ const ChangeBoardModal = () => {
- Cancel
+ {t('boards.cancel')}
- Move
+ {t('boards.move')}
diff --git a/invokeai/frontend/web/src/features/controlNet/components/ControlNet.tsx b/invokeai/frontend/web/src/features/controlNet/components/ControlNet.tsx
index 1f70542494..d40a234495 100644
--- a/invokeai/frontend/web/src/features/controlNet/components/ControlNet.tsx
+++ b/invokeai/frontend/web/src/features/controlNet/components/ControlNet.tsx
@@ -28,6 +28,7 @@ import ParamControlNetBeginEnd from './parameters/ParamControlNetBeginEnd';
import ParamControlNetControlMode from './parameters/ParamControlNetControlMode';
import ParamControlNetProcessorSelect from './parameters/ParamControlNetProcessorSelect';
import ParamControlNetResizeMode from './parameters/ParamControlNetResizeMode';
+import { useTranslation } from 'react-i18next';
type ControlNetProps = {
controlNet: ControlNetConfig;
@@ -37,6 +38,7 @@ const ControlNet = (props: ControlNetProps) => {
const { controlNet } = props;
const { controlNetId } = controlNet;
const dispatch = useAppDispatch();
+ const { t } = useTranslation();
const activeTabName = useAppSelector(activeTabNameSelector);
@@ -95,8 +97,8 @@ const ControlNet = (props: ControlNetProps) => {
>
@@ -117,23 +119,31 @@ const ControlNet = (props: ControlNetProps) => {
)}
}
/>
}
/>
{
} = controlNet;
const dispatch = useAppDispatch();
+ const { t } = useTranslation();
const { pendingControlImages, autoAddBoardId } = useAppSelector(selector);
const activeTabName = useAppSelector(activeTabNameSelector);
@@ -208,18 +210,18 @@ const ControlNetImagePreview = ({ isSmall, controlNet }: Props) => {
: undefined}
- tooltip="Reset Control Image"
+ tooltip={t('controlnet.resetControlImage')}
/>
: undefined}
- tooltip="Save Control Image"
+ tooltip={t('controlnet.saveControlImage')}
styleOverrides={{ marginTop: 6 }}
/>
: undefined}
- tooltip="Set Control Image Dimensions To W/H"
+ tooltip={t('controlnet.setControlImageDimensions')}
styleOverrides={{ marginTop: 12 }}
/>
>
diff --git a/invokeai/frontend/web/src/features/controlNet/components/ParamControlNetShouldAutoConfig.tsx b/invokeai/frontend/web/src/features/controlNet/components/ParamControlNetShouldAutoConfig.tsx
index 0e044d4575..76f1cb9dfe 100644
--- a/invokeai/frontend/web/src/features/controlNet/components/ParamControlNetShouldAutoConfig.tsx
+++ b/invokeai/frontend/web/src/features/controlNet/components/ParamControlNetShouldAutoConfig.tsx
@@ -6,6 +6,7 @@ import {
} from 'features/controlNet/store/controlNetSlice';
import { selectIsBusy } from 'features/system/store/systemSelectors';
import { memo, useCallback } from 'react';
+import { useTranslation } from 'react-i18next';
type Props = {
controlNet: ControlNetConfig;
@@ -15,6 +16,7 @@ const ParamControlNetShouldAutoConfig = (props: Props) => {
const { controlNetId, isEnabled, shouldAutoConfig } = props.controlNet;
const dispatch = useAppDispatch();
const isBusy = useAppSelector(selectIsBusy);
+ const { t } = useTranslation();
const handleShouldAutoConfigChanged = useCallback(() => {
dispatch(controlNetAutoConfigToggled({ controlNetId }));
@@ -22,8 +24,8 @@ const ParamControlNetShouldAutoConfig = (props: Props) => {
return (
{
const { controlNet } = props;
const dispatch = useAppDispatch();
+ const { t } = useTranslation();
const handleImportImageFromCanvas = useCallback(() => {
dispatch(canvasImageToControlNet({ controlNet }));
@@ -36,15 +38,15 @@ const ControlNetCanvasImageImports = (
}
- tooltip="Import Image From Canvas"
- aria-label="Import Image From Canvas"
+ tooltip={t('controlnet.importImageFromCanvas')}
+ aria-label={t('controlnet.importImageFromCanvas')}
onClick={handleImportImageFromCanvas}
/>
}
- tooltip="Import Mask From Canvas"
- aria-label="Import Mask From Canvas"
+ tooltip={t('controlnet.importMaskFromCanvas')}
+ aria-label={t('controlnet.importMaskFromCanvas')}
onClick={handleImportMaskFromCanvas}
/>
diff --git a/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetBeginEnd.tsx b/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetBeginEnd.tsx
index 1219239e5d..f34c863cff 100644
--- a/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetBeginEnd.tsx
+++ b/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetBeginEnd.tsx
@@ -16,6 +16,7 @@ import {
controlNetEndStepPctChanged,
} from 'features/controlNet/store/controlNetSlice';
import { memo, useCallback } from 'react';
+import { useTranslation } from 'react-i18next';
type Props = {
controlNet: ControlNetConfig;
@@ -27,6 +28,7 @@ const ParamControlNetBeginEnd = (props: Props) => {
const { beginStepPct, endStepPct, isEnabled, controlNetId } =
props.controlNet;
const dispatch = useAppDispatch();
+ const { t } = useTranslation();
const handleStepPctChanged = useCallback(
(v: number[]) => {
@@ -48,10 +50,10 @@ const ParamControlNetBeginEnd = (props: Props) => {
return (
- Begin / End Step Percentage
+ {t('controlnet.beginEndStepPercent')}
{
@@ -34,7 +36,7 @@ export default function ParamControlNetControlMode(
return (
{
const isBusy = useAppSelector(selectIsBusy);
const { mainModel } = useAppSelector(selector);
+ const { t } = useTranslation();
const { data: controlNetModels } = useGetControlNetModelsQuery();
@@ -58,13 +60,13 @@ const ParamControlNetModel = (props: ParamControlNetModelProps) => {
group: MODEL_TYPE_MAP[model.base_model],
disabled,
tooltip: disabled
- ? `Incompatible base model: ${model.base_model}`
+ ? `${t('controlnet.incompatibleBaseModel')} ${model.base_model}`
: undefined,
});
});
return data;
- }, [controlNetModels, mainModel?.base_model]);
+ }, [controlNetModels, mainModel?.base_model, t]);
// grab the full model entity from the RTK Query cache
const selectedModel = useMemo(
@@ -105,7 +107,7 @@ const ParamControlNetModel = (props: ParamControlNetModelProps) => {
error={
!selectedModel || mainModel?.base_model !== selectedModel.base_model
}
- placeholder="Select a model"
+ placeholder={t('controlnet.selectModel')}
value={selectedModel?.id ?? null}
onChange={handleModelChanged}
disabled={isBusy || !isEnabled}
diff --git a/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetProcessorSelect.tsx b/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetProcessorSelect.tsx
index 190b1bc012..a357547403 100644
--- a/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetProcessorSelect.tsx
+++ b/invokeai/frontend/web/src/features/controlNet/components/parameters/ParamControlNetProcessorSelect.tsx
@@ -15,6 +15,7 @@ import {
controlNetProcessorTypeChanged,
} from '../../store/controlNetSlice';
import { ControlNetProcessorType } from '../../store/types';
+import { useTranslation } from 'react-i18next';
type ParamControlNetProcessorSelectProps = {
controlNet: ControlNetConfig;
@@ -57,6 +58,7 @@ const ParamControlNetProcessorSelect = (
const { controlNetId, isEnabled, processorNode } = props.controlNet;
const isBusy = useAppSelector(selectIsBusy);
const controlNetProcessors = useAppSelector(selector);
+ const { t } = useTranslation();
const handleProcessorTypeChanged = useCallback(
(v: string | null) => {
@@ -72,7 +74,7 @@ const ParamControlNetProcessorSelect = (
return (
{
@@ -33,7 +35,7 @@ export default function ParamControlNetResizeMode(
return (
{
const { weight, isEnabled, controlNetId } = props.controlNet;
const dispatch = useAppDispatch();
+ const { t } = useTranslation();
const handleWeightChanged = useCallback(
(weight: number) => {
dispatch(controlNetWeightChanged({ controlNetId, weight }));
@@ -23,7 +25,7 @@ const ParamControlNetWeight = (props: ParamControlNetWeightProps) => {
return (
{
const { low_threshold, high_threshold } = processorNode;
const isBusy = useAppSelector(selectIsBusy);
const processorChanged = useProcessorNodeChanged();
+ const { t } = useTranslation();
const handleLowThresholdChanged = useCallback(
(v: number) => {
@@ -52,7 +54,7 @@ const CannyProcessor = (props: CannyProcessorProps) => {
{
/>
{
const { image_resolution, detect_resolution, w, h, f } = processorNode;
const processorChanged = useProcessorNodeChanged();
const isBusy = useAppSelector(selectIsBusy);
+ const { t } = useTranslation();
const handleDetectResolutionChanged = useCallback(
(v: number) => {
@@ -90,7 +92,7 @@ const ContentShuffleProcessor = (props: Props) => {
return (
{
isDisabled={isBusy || !isEnabled}
/>
{
isDisabled={isBusy || !isEnabled}
/>
{
isDisabled={isBusy || !isEnabled}
/>
{
isDisabled={isBusy || !isEnabled}
/>
{
} = props;
const isBusy = useAppSelector(selectIsBusy);
const processorChanged = useProcessorNodeChanged();
+ const { t } = useTranslation();
const handleDetectResolutionChanged = useCallback(
(v: number) => {
@@ -62,7 +64,7 @@ const HedPreprocessor = (props: HedProcessorProps) => {
return (
{
isDisabled={isBusy || !isEnabled}
/>
{
isDisabled={isBusy || !isEnabled}
/>
{
const { image_resolution, detect_resolution } = processorNode;
const processorChanged = useProcessorNodeChanged();
const isBusy = useAppSelector(selectIsBusy);
+ const { t } = useTranslation();
const handleDetectResolutionChanged = useCallback(
(v: number) => {
@@ -51,7 +53,7 @@ const LineartAnimeProcessor = (props: Props) => {
return (
{
isDisabled={isBusy || !isEnabled}
/>
{
const { image_resolution, detect_resolution, coarse } = processorNode;
const processorChanged = useProcessorNodeChanged();
const isBusy = useAppSelector(selectIsBusy);
+ const { t } = useTranslation();
const handleDetectResolutionChanged = useCallback(
(v: number) => {
@@ -59,7 +61,7 @@ const LineartProcessor = (props: LineartProcessorProps) => {
return (
{
isDisabled={isBusy || !isEnabled}
/>
{
isDisabled={isBusy || !isEnabled}
/>
{
const { max_faces, min_confidence } = processorNode;
const processorChanged = useProcessorNodeChanged();
const isBusy = useAppSelector(selectIsBusy);
+ const { t } = useTranslation();
const handleMaxFacesChanged = useCallback(
(v: number) => {
@@ -47,7 +49,7 @@ const MediapipeFaceProcessor = (props: Props) => {
return (
{
isDisabled={isBusy || !isEnabled}
/>
{
const { a_mult, bg_th } = processorNode;
const processorChanged = useProcessorNodeChanged();
const isBusy = useAppSelector(selectIsBusy);
+ const { t } = useTranslation();
const handleAMultChanged = useCallback(
(v: number) => {
@@ -47,7 +49,7 @@ const MidasDepthProcessor = (props: Props) => {
return (
{
isDisabled={isBusy || !isEnabled}
/>
{
const { image_resolution, detect_resolution, thr_d, thr_v } = processorNode;
const processorChanged = useProcessorNodeChanged();
const isBusy = useAppSelector(selectIsBusy);
+ const { t } = useTranslation();
const handleDetectResolutionChanged = useCallback(
(v: number) => {
@@ -73,7 +75,7 @@ const MlsdImageProcessor = (props: Props) => {
return (
{
isDisabled={isBusy || !isEnabled}
/>
{
isDisabled={isBusy || !isEnabled}
/>
{
isDisabled={isBusy || !isEnabled}
/>
{
const { image_resolution, detect_resolution } = processorNode;
const processorChanged = useProcessorNodeChanged();
const isBusy = useAppSelector(selectIsBusy);
+ const { t } = useTranslation();
const handleDetectResolutionChanged = useCallback(
(v: number) => {
@@ -51,7 +53,7 @@ const NormalBaeProcessor = (props: Props) => {
return (
{
isDisabled={isBusy || !isEnabled}
/>
{
const { image_resolution, detect_resolution, hand_and_face } = processorNode;
const processorChanged = useProcessorNodeChanged();
const isBusy = useAppSelector(selectIsBusy);
+ const { t } = useTranslation();
const handleDetectResolutionChanged = useCallback(
(v: number) => {
@@ -59,7 +61,7 @@ const OpenposeProcessor = (props: Props) => {
return (
{
isDisabled={isBusy || !isEnabled}
/>
{
isDisabled={isBusy || !isEnabled}
/>
{
const { image_resolution, detect_resolution, scribble, safe } = processorNode;
const processorChanged = useProcessorNodeChanged();
const isBusy = useAppSelector(selectIsBusy);
+ const { t } = useTranslation();
const handleDetectResolutionChanged = useCallback(
(v: number) => {
@@ -66,7 +68,7 @@ const PidiProcessor = (props: Props) => {
return (
{
isDisabled={isBusy || !isEnabled}
/>
{
isDisabled={isBusy || !isEnabled}
/>
;
-
/**
* A dict of ControlNet processors, including:
* - type
@@ -25,16 +25,24 @@ type ControlNetProcessorsDict = Record<
export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
none: {
type: 'none',
- label: 'none',
- description: '',
+ get label() {
+ return i18n.t('controlnet.none');
+ },
+ get description() {
+ return i18n.t('controlnet.noneDescription');
+ },
default: {
type: 'none',
},
},
canny_image_processor: {
type: 'canny_image_processor',
- label: 'Canny',
- description: '',
+ get label() {
+ return i18n.t('controlnet.canny');
+ },
+ get description() {
+ return i18n.t('controlnet.cannyDescription');
+ },
default: {
id: 'canny_image_processor',
type: 'canny_image_processor',
@@ -44,8 +52,12 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
},
content_shuffle_image_processor: {
type: 'content_shuffle_image_processor',
- label: 'Content Shuffle',
- description: '',
+ get label() {
+ return i18n.t('controlnet.contentShuffle');
+ },
+ get description() {
+ return i18n.t('controlnet.contentShuffleDescription');
+ },
default: {
id: 'content_shuffle_image_processor',
type: 'content_shuffle_image_processor',
@@ -58,8 +70,12 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
},
hed_image_processor: {
type: 'hed_image_processor',
- label: 'HED',
- description: '',
+ get label() {
+ return i18n.t('controlnet.hed');
+ },
+ get description() {
+ return i18n.t('controlnet.hedDescription');
+ },
default: {
id: 'hed_image_processor',
type: 'hed_image_processor',
@@ -70,8 +86,12 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
},
lineart_anime_image_processor: {
type: 'lineart_anime_image_processor',
- label: 'Lineart Anime',
- description: '',
+ get label() {
+ return i18n.t('controlnet.lineartAnime');
+ },
+ get description() {
+ return i18n.t('controlnet.lineartAnimeDescription');
+ },
default: {
id: 'lineart_anime_image_processor',
type: 'lineart_anime_image_processor',
@@ -81,8 +101,12 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
},
lineart_image_processor: {
type: 'lineart_image_processor',
- label: 'Lineart',
- description: '',
+ get label() {
+ return i18n.t('controlnet.lineart');
+ },
+ get description() {
+ return i18n.t('controlnet.lineartDescription');
+ },
default: {
id: 'lineart_image_processor',
type: 'lineart_image_processor',
@@ -93,8 +117,12 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
},
mediapipe_face_processor: {
type: 'mediapipe_face_processor',
- label: 'Mediapipe Face',
- description: '',
+ get label() {
+ return i18n.t('controlnet.mediapipeFace');
+ },
+ get description() {
+ return i18n.t('controlnet.mediapipeFaceDescription');
+ },
default: {
id: 'mediapipe_face_processor',
type: 'mediapipe_face_processor',
@@ -104,8 +132,12 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
},
midas_depth_image_processor: {
type: 'midas_depth_image_processor',
- label: 'Depth (Midas)',
- description: '',
+ get label() {
+ return i18n.t('controlnet.depthMidas');
+ },
+ get description() {
+ return i18n.t('controlnet.depthMidasDescription');
+ },
default: {
id: 'midas_depth_image_processor',
type: 'midas_depth_image_processor',
@@ -115,8 +147,12 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
},
mlsd_image_processor: {
type: 'mlsd_image_processor',
- label: 'M-LSD',
- description: '',
+ get label() {
+ return i18n.t('controlnet.mlsd');
+ },
+ get description() {
+ return i18n.t('controlnet.mlsdDescription');
+ },
default: {
id: 'mlsd_image_processor',
type: 'mlsd_image_processor',
@@ -128,8 +164,12 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
},
normalbae_image_processor: {
type: 'normalbae_image_processor',
- label: 'Normal BAE',
- description: '',
+ get label() {
+ return i18n.t('controlnet.normalBae');
+ },
+ get description() {
+ return i18n.t('controlnet.normalBaeDescription');
+ },
default: {
id: 'normalbae_image_processor',
type: 'normalbae_image_processor',
@@ -139,8 +179,12 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
},
openpose_image_processor: {
type: 'openpose_image_processor',
- label: 'Openpose',
- description: '',
+ get label() {
+ return i18n.t('controlnet.openPose');
+ },
+ get description() {
+ return i18n.t('controlnet.openPoseDescription');
+ },
default: {
id: 'openpose_image_processor',
type: 'openpose_image_processor',
@@ -151,8 +195,12 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
},
pidi_image_processor: {
type: 'pidi_image_processor',
- label: 'PIDI',
- description: '',
+ get label() {
+ return i18n.t('controlnet.pidi');
+ },
+ get description() {
+ return i18n.t('controlnet.pidiDescription');
+ },
default: {
id: 'pidi_image_processor',
type: 'pidi_image_processor',
@@ -164,8 +212,12 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
},
zoe_depth_image_processor: {
type: 'zoe_depth_image_processor',
- label: 'Depth (Zoe)',
- description: '',
+ get label() {
+ return i18n.t('controlnet.depthZoe');
+ },
+ get description() {
+ return i18n.t('controlnet.depthZoeDescription');
+ },
default: {
id: 'zoe_depth_image_processor',
type: 'zoe_depth_image_processor',
@@ -186,4 +238,6 @@ export const CONTROLNET_MODEL_DEFAULT_PROCESSORS: {
shuffle: 'content_shuffle_image_processor',
openpose: 'openpose_image_processor',
mediapipe: 'mediapipe_face_processor',
+ pidi: 'pidi_image_processor',
+ zoe: 'zoe_depth_image_processor',
};
diff --git a/invokeai/frontend/web/src/features/deleteImageModal/components/ImageUsageMessage.tsx b/invokeai/frontend/web/src/features/deleteImageModal/components/ImageUsageMessage.tsx
index 3ed7f3f05f..de1782b439 100644
--- a/invokeai/frontend/web/src/features/deleteImageModal/components/ImageUsageMessage.tsx
+++ b/invokeai/frontend/web/src/features/deleteImageModal/components/ImageUsageMessage.tsx
@@ -2,16 +2,19 @@ import { ListItem, Text, UnorderedList } from '@chakra-ui/react';
import { some } from 'lodash-es';
import { memo } from 'react';
import { ImageUsage } from '../store/types';
+import { useTranslation } from 'react-i18next';
+
type Props = {
imageUsage?: ImageUsage;
topMessage?: string;
bottomMessage?: string;
};
const ImageUsageMessage = (props: Props) => {
+ const { t } = useTranslation();
const {
imageUsage,
- topMessage = 'This image is currently in use in the following features:',
- bottomMessage = 'If you delete this image, those features will immediately be reset.',
+ topMessage = t('gallery.currentlyInUse'),
+ bottomMessage = t('gallery.featuresWillReset'),
} = props;
if (!imageUsage) {
@@ -26,10 +29,18 @@ const ImageUsageMessage = (props: Props) => {
<>
{topMessage}
- {imageUsage.isInitialImage && Image to Image}
- {imageUsage.isCanvasImage && Unified Canvas}
- {imageUsage.isControlNetImage && ControlNet}
- {imageUsage.isNodesImage && Node Editor}
+ {imageUsage.isInitialImage && (
+ {t('common.img2img')}
+ )}
+ {imageUsage.isCanvasImage && (
+ {t('common.unifiedCanvas')}
+ )}
+ {imageUsage.isControlNetImage && (
+ {t('common.controlNet')}
+ )}
+ {imageUsage.isNodesImage && (
+ {t('common.nodeEditor')}
+ )}
{bottomMessage}
>
diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCollapse.tsx b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCollapse.tsx
index 70aadcd4ce..b9fd655a43 100644
--- a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCollapse.tsx
+++ b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCollapse.tsx
@@ -9,6 +9,7 @@ import { useFeatureStatus } from '../../system/hooks/useFeatureStatus';
import ParamDynamicPromptsCombinatorial from './ParamDynamicPromptsCombinatorial';
import ParamDynamicPromptsToggle from './ParamDynamicPromptsEnabled';
import ParamDynamicPromptsMaxPrompts from './ParamDynamicPromptsMaxPrompts';
+import { useTranslation } from 'react-i18next';
const selector = createSelector(
stateSelector,
@@ -22,6 +23,7 @@ const selector = createSelector(
const ParamDynamicPromptsCollapse = () => {
const { activeLabel } = useAppSelector(selector);
+ const { t } = useTranslation();
const isDynamicPromptingEnabled =
useFeatureStatus('dynamicPrompting').isFeatureEnabled;
@@ -31,7 +33,7 @@ const ParamDynamicPromptsCollapse = () => {
}
return (
-
+
diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCombinatorial.tsx b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCombinatorial.tsx
index c028a5d55c..406dc8e216 100644
--- a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCombinatorial.tsx
+++ b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCombinatorial.tsx
@@ -5,6 +5,7 @@ import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
import IAISwitch from 'common/components/IAISwitch';
import { memo, useCallback } from 'react';
import { combinatorialToggled } from '../store/dynamicPromptsSlice';
+import { useTranslation } from 'react-i18next';
const selector = createSelector(
stateSelector,
@@ -19,6 +20,7 @@ const selector = createSelector(
const ParamDynamicPromptsCombinatorial = () => {
const { combinatorial, isDisabled } = useAppSelector(selector);
const dispatch = useAppDispatch();
+ const { t } = useTranslation();
const handleChange = useCallback(() => {
dispatch(combinatorialToggled());
@@ -27,7 +29,7 @@ const ParamDynamicPromptsCombinatorial = () => {
return (
diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsEnabled.tsx b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsEnabled.tsx
index 1b31147937..a1d16b5361 100644
--- a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsEnabled.tsx
+++ b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsEnabled.tsx
@@ -5,6 +5,7 @@ import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
import IAISwitch from 'common/components/IAISwitch';
import { memo, useCallback } from 'react';
import { isEnabledToggled } from '../store/dynamicPromptsSlice';
+import { useTranslation } from 'react-i18next';
const selector = createSelector(
stateSelector,
@@ -19,6 +20,7 @@ const selector = createSelector(
const ParamDynamicPromptsToggle = () => {
const dispatch = useAppDispatch();
const { isEnabled } = useAppSelector(selector);
+ const { t } = useTranslation();
const handleToggleIsEnabled = useCallback(() => {
dispatch(isEnabledToggled());
@@ -26,7 +28,7 @@ const ParamDynamicPromptsToggle = () => {
return (
diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsMaxPrompts.tsx b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsMaxPrompts.tsx
index f374f1cb15..158fab91d9 100644
--- a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsMaxPrompts.tsx
+++ b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsMaxPrompts.tsx
@@ -8,6 +8,7 @@ import {
maxPromptsChanged,
maxPromptsReset,
} from '../store/dynamicPromptsSlice';
+import { useTranslation } from 'react-i18next';
const selector = createSelector(
stateSelector,
@@ -31,6 +32,7 @@ const ParamDynamicPromptsMaxPrompts = () => {
const { maxPrompts, min, sliderMax, inputMax, isDisabled } =
useAppSelector(selector);
const dispatch = useAppDispatch();
+ const { t } = useTranslation();
const handleChange = useCallback(
(v: number) => {
@@ -45,7 +47,7 @@ const ParamDynamicPromptsMaxPrompts = () => {
return (
void;
@@ -8,11 +9,12 @@ type Props = {
const AddEmbeddingButton = (props: Props) => {
const { onClick } = props;
+ const { t } = useTranslation();
return (
}
sx={{
p: 2,
diff --git a/invokeai/frontend/web/src/features/embedding/components/ParamEmbeddingPopover.tsx b/invokeai/frontend/web/src/features/embedding/components/ParamEmbeddingPopover.tsx
index 93daaf946f..164fd01a1f 100644
--- a/invokeai/frontend/web/src/features/embedding/components/ParamEmbeddingPopover.tsx
+++ b/invokeai/frontend/web/src/features/embedding/components/ParamEmbeddingPopover.tsx
@@ -16,6 +16,7 @@ import { forEach } from 'lodash-es';
import { PropsWithChildren, memo, useCallback, useMemo, useRef } from 'react';
import { useGetTextualInversionModelsQuery } from 'services/api/endpoints/models';
import { PARAMETERS_PANEL_WIDTH } from 'theme/util/constants';
+import { useTranslation } from 'react-i18next';
type Props = PropsWithChildren & {
onSelect: (v: string) => void;
@@ -27,6 +28,7 @@ const ParamEmbeddingPopover = (props: Props) => {
const { onSelect, isOpen, onClose, children } = props;
const { data: embeddingQueryData } = useGetTextualInversionModelsQuery();
const inputRef = useRef(null);
+ const { t } = useTranslation();
const currentMainModel = useAppSelector(
(state: RootState) => state.generation.model
@@ -52,7 +54,7 @@ const ParamEmbeddingPopover = (props: Props) => {
group: MODEL_TYPE_MAP[embedding.base_model],
disabled,
tooltip: disabled
- ? `Incompatible base model: ${embedding.base_model}`
+ ? `${t('embedding.incompatibleModel')} ${embedding.base_model}`
: undefined,
});
});
@@ -63,7 +65,7 @@ const ParamEmbeddingPopover = (props: Props) => {
);
return data.sort((a, b) => (a.disabled && !b.disabled ? 1 : -1));
- }, [embeddingQueryData, currentMainModel?.base_model]);
+ }, [embeddingQueryData, currentMainModel?.base_model, t]);
const handleChange = useCallback(
(v: string | null) => {
@@ -118,10 +120,10 @@ const ParamEmbeddingPopover = (props: Props) => {
{
const dispatch = useAppDispatch();
+ const { t } = useTranslation();
const { autoAddBoardId, autoAssignBoardOnClick, isProcessing } =
useAppSelector(selector);
const inputRef = useRef(null);
@@ -63,13 +65,13 @@ const BoardAutoAddSelect = () => {
return (
diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx
index c711ad892b..e5eb92028d 100644
--- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx
@@ -16,6 +16,7 @@ import { menuListMotionProps } from 'theme/components/menu';
import GalleryBoardContextMenuItems from './GalleryBoardContextMenuItems';
import NoBoardContextMenuItems from './NoBoardContextMenuItems';
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
+import { useTranslation } from 'react-i18next';
type Props = {
board?: BoardDTO;
@@ -59,6 +60,8 @@ const BoardContextMenu = ({
e.preventDefault();
}, []);
+ const { t } = useTranslation();
+
return (
menuProps={{ size: 'sm', isLazy: true }}
@@ -78,7 +81,7 @@ const BoardContextMenu = ({
isDisabled={isAutoAdd || isProcessing || autoAssignBoardOnClick}
onClick={handleSetAutoAdd}
>
- Auto-add to this Board
+ {t('boards.menuItemAutoAdd')}
{!board && }
{board && (
diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/AddBoardButton.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/AddBoardButton.tsx
index ebd08e94d5..96739f4c84 100644
--- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/AddBoardButton.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/AddBoardButton.tsx
@@ -2,22 +2,22 @@ import IAIIconButton from 'common/components/IAIIconButton';
import { memo, useCallback } from 'react';
import { FaPlus } from 'react-icons/fa';
import { useCreateBoardMutation } from 'services/api/endpoints/boards';
-
-const DEFAULT_BOARD_NAME = 'My Board';
+import { useTranslation } from 'react-i18next';
const AddBoardButton = () => {
+ const { t } = useTranslation();
const [createBoard, { isLoading }] = useCreateBoardMutation();
-
+ const DEFAULT_BOARD_NAME = t('boards.myBoard');
const handleCreateBoard = useCallback(() => {
createBoard(DEFAULT_BOARD_NAME);
- }, [createBoard]);
+ }, [createBoard, DEFAULT_BOARD_NAME]);
return (
}
isLoading={isLoading}
- tooltip="Add Board"
- aria-label="Add Board"
+ tooltip={t('boards.addBoard')}
+ aria-label={t('boards.addBoard')}
onClick={handleCreateBoard}
size="sm"
/>
diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsSearch.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsSearch.tsx
index d7db96a938..2d2a85597c 100644
--- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsSearch.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsSearch.tsx
@@ -18,6 +18,7 @@ import {
useEffect,
useRef,
} from 'react';
+import { useTranslation } from 'react-i18next';
const selector = createSelector(
[stateSelector],
@@ -32,6 +33,7 @@ const BoardsSearch = () => {
const dispatch = useAppDispatch();
const { boardSearchText } = useAppSelector(selector);
const inputRef = useRef(null);
+ const { t } = useTranslation();
const handleBoardSearch = useCallback(
(searchTerm: string) => {
@@ -73,7 +75,7 @@ const BoardsSearch = () => {
{
onClick={clearBoardSearch}
size="xs"
variant="ghost"
- aria-label="Clear Search"
+ aria-label={t('boards.clearSearch')}
opacity={0.5}
icon={}
/>
diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/DeleteBoardModal.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/DeleteBoardModal.tsx
index f09cf131e2..2c54f06cec 100644
--- a/invokeai/frontend/web/src/features/gallery/components/Boards/DeleteBoardModal.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/Boards/DeleteBoardModal.tsx
@@ -132,8 +132,8 @@ const DeleteBoardModal = (props: Props) => {
) : (
)}
Deleted boards cannot be restored.
diff --git a/invokeai/frontend/web/src/features/gallery/components/CurrentImage/CurrentImagePreview.tsx b/invokeai/frontend/web/src/features/gallery/components/CurrentImage/CurrentImagePreview.tsx
index 2576c8e9e3..b16820a38f 100644
--- a/invokeai/frontend/web/src/features/gallery/components/CurrentImage/CurrentImagePreview.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/CurrentImage/CurrentImagePreview.tsx
@@ -19,6 +19,7 @@ import { FaImage } from 'react-icons/fa';
import { useGetImageDTOQuery } from 'services/api/endpoints/images';
import ImageMetadataViewer from '../ImageMetadataViewer/ImageMetadataViewer';
import NextPrevImageButtons from '../NextPrevImageButtons';
+import { useTranslation } from 'react-i18next';
export const imagesSelector = createSelector(
[stateSelector, selectLastSelectedImage],
@@ -117,6 +118,8 @@ const CurrentImagePreview = () => {
const timeoutId = useRef(0);
+ const { t } = useTranslation();
+
const handleMouseOver = useCallback(() => {
setShouldShowNextPrevButtons(true);
window.clearTimeout(timeoutId.current);
@@ -164,7 +167,7 @@ const CurrentImagePreview = () => {
isUploadDisabled={true}
fitContainer
useThumbailFallback
- dropLabel="Set as Current Image"
+ dropLabel={t('gallery.setCurrentImage')}
noContentFallback={
}
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx
index 35d6cc3361..fe9d891b23 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx
@@ -18,6 +18,7 @@ import {
useUnstarImagesMutation,
} from 'services/api/endpoints/images';
import IAIDndImageIcon from '../../../../common/components/IAIDndImageIcon';
+import { useTranslation } from 'react-i18next';
interface HoverableImageProps {
imageName: string;
@@ -28,6 +29,7 @@ const GalleryImage = (props: HoverableImageProps) => {
const { imageName } = props;
const { currentData: imageDTO } = useGetImageDTOQuery(imageName);
const shift = useAppSelector((state) => state.hotkeys.shift);
+ const { t } = useTranslation();
const { handleClick, isSelected, selection, selectionCount } =
useMultiselect(imageDTO);
@@ -136,7 +138,7 @@ const GalleryImage = (props: HoverableImageProps) => {
}
- tooltip="Delete"
+ tooltip={t('gallery.deleteImage')}
styleOverrides={{
bottom: 2,
top: 'auto',
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImageGrid.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImageGrid.tsx
index bacd5c38ad..dc41a2ef2a 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImageGrid.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImageGrid.tsx
@@ -95,7 +95,7 @@ const GalleryImageGrid = () => {
justifyContent: 'center',
}}
>
-
+
);
}
@@ -140,7 +140,7 @@ const GalleryImageGrid = () => {
onClick={handleLoadMoreImages}
isDisabled={!areMoreAvailable}
isLoading={isFetching}
- loadingText="Loading"
+ loadingText={t('gallery.loading')}
flexShrink={0}
>
{`Load More (${currentData.ids.length} of ${currentViewTotal})`}
@@ -153,7 +153,7 @@ const GalleryImageGrid = () => {
return (
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/DataViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/DataViewer.tsx
index 2267bf15c2..ed7df88e3a 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/DataViewer.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/DataViewer.tsx
@@ -3,6 +3,7 @@ import { isString } from 'lodash-es';
import { OverlayScrollbarsComponent } from 'overlayscrollbars-react';
import { memo, useCallback, useMemo } from 'react';
import { FaCopy, FaDownload } from 'react-icons/fa';
+import { useTranslation } from 'react-i18next';
type Props = {
label: string;
@@ -33,6 +34,8 @@ const DataViewer = (props: Props) => {
a.remove();
}, [dataString, label, fileName]);
+ const { t } = useTranslation();
+
return (
{
{withDownload && (
-
+
}
variant="ghost"
opacity={0.7}
@@ -84,9 +87,9 @@ const DataViewer = (props: Props) => {
)}
{withCopy && (
-
+
}
variant="ghost"
opacity={0.7}
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx
index e82b03360e..c1124477e2 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx
@@ -2,6 +2,7 @@ import { CoreMetadata } from 'features/nodes/types/types';
import { useRecallParameters } from 'features/parameters/hooks/useRecallParameters';
import { memo, useCallback } from 'react';
import ImageMetadataItem from './ImageMetadataItem';
+import { useTranslation } from 'react-i18next';
type Props = {
metadata?: CoreMetadata;
@@ -10,6 +11,8 @@ type Props = {
const ImageMetadataActions = (props: Props) => {
const { metadata } = props;
+ const { t } = useTranslation();
+
const {
recallPositivePrompt,
recallNegativePrompt,
@@ -70,17 +73,20 @@ const ImageMetadataActions = (props: Props) => {
return (
<>
{metadata.created_by && (
-
+
)}
{metadata.generation_mode && (
)}
{metadata.positive_prompt && (
{
)}
{metadata.negative_prompt && (
{
)}
{metadata.seed !== undefined && metadata.seed !== null && (
@@ -105,63 +111,63 @@ const ImageMetadataActions = (props: Props) => {
metadata.model !== null &&
metadata.model.model_name && (
)}
{metadata.width && (
)}
{metadata.height && (
)}
{/* {metadata.threshold !== undefined && (
dispatch(setThreshold(Number(metadata.threshold)))}
/>
)}
{metadata.perlin !== undefined && (
dispatch(setPerlin(Number(metadata.perlin)))}
/>
)} */}
{metadata.scheduler && (
)}
{metadata.steps && (
)}
{metadata.cfg_scale !== undefined && metadata.cfg_scale !== null && (
)}
{/* {metadata.variations && metadata.variations.length > 0 && (
dispatch(setSeamless(metadata.seamless))}
/>
)}
{metadata.hires_fix && (
dispatch(setHiresFix(metadata.hires_fix))}
/>
@@ -187,7 +193,7 @@ const ImageMetadataActions = (props: Props) => {
{/* {init_image_path && (
dispatch(setInitialImage(init_image_path))}
@@ -195,14 +201,14 @@ const ImageMetadataActions = (props: Props) => {
)} */}
{metadata.strength && (
)}
{/* {metadata.fit && (
dispatch(setShouldFitToWidthHeight(metadata.fit))}
/>
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataViewer.tsx
index bec5125657..5be0b08700 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataViewer.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataViewer.tsx
@@ -17,6 +17,7 @@ import DataViewer from './DataViewer';
import ImageMetadataActions from './ImageMetadataActions';
import { useAppSelector } from '../../../../app/store/storeHooks';
import { configSelector } from '../../../system/store/configSelectors';
+import { useTranslation } from 'react-i18next';
type ImageMetadataViewerProps = {
image: ImageDTO;
@@ -28,6 +29,7 @@ const ImageMetadataViewer = ({ image }: ImageMetadataViewerProps) => {
// useHotkeys('esc', () => {
// dispatch(setShouldShowImageDetails(false));
// });
+ const { t } = useTranslation();
const { shouldFetchMetadataFromApi } = useAppSelector(configSelector);
@@ -70,31 +72,31 @@ const ImageMetadataViewer = ({ image }: ImageMetadataViewerProps) => {
sx={{ display: 'flex', flexDir: 'column', w: 'full', h: 'full' }}
>
- Metadata
- Image Details
- Workflow
+ {t('metadata.metadata')}
+ {t('metadata.imageDetails')}
+ {t('metadata.workflow')}
{metadata ? (
-
+
) : (
-
+
)}
{image ? (
-
+
) : (
-
+
)}
{workflow ? (
-
+
) : (
-
+
)}
diff --git a/invokeai/frontend/web/src/features/nodes/components/NodeEditor.tsx b/invokeai/frontend/web/src/features/nodes/components/NodeEditor.tsx
index 4cefdbb20b..3675dd9af9 100644
--- a/invokeai/frontend/web/src/features/nodes/components/NodeEditor.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/NodeEditor.tsx
@@ -12,9 +12,11 @@ import TopCenterPanel from './flow/panels/TopCenterPanel/TopCenterPanel';
import TopRightPanel from './flow/panels/TopRightPanel/TopRightPanel';
import BottomLeftPanel from './flow/panels/BottomLeftPanel/BottomLeftPanel';
import MinimapPanel from './flow/panels/MinimapPanel/MinimapPanel';
+import { useTranslation } from 'react-i18next';
const NodeEditor = () => {
const isReady = useAppSelector((state) => state.nodes.isReady);
+ const { t } = useTranslation();
return (
{
}}
>
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/AddNodePopover/AddNodePopover.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/AddNodePopover/AddNodePopover.tsx
index 83f7482177..4433adf4ab 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/AddNodePopover/AddNodePopover.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/AddNodePopover/AddNodePopover.tsx
@@ -24,6 +24,7 @@ import { HotkeyCallback } from 'react-hotkeys-hook/dist/types';
import 'reactflow/dist/style.css';
import { AnyInvocationType } from 'services/events/types';
import { AddNodePopoverSelectItem } from './AddNodePopoverSelectItem';
+import { useTranslation } from 'react-i18next';
type NodeTemplate = {
label: string;
@@ -48,43 +49,45 @@ const filter = (value: string, item: NodeTemplate) => {
);
};
-const selector = createSelector(
- [stateSelector],
- ({ nodes }) => {
- const data: NodeTemplate[] = map(nodes.nodeTemplates, (template) => {
- return {
- label: template.title,
- value: template.type,
- description: template.description,
- tags: template.tags,
- };
- });
-
- data.push({
- label: 'Progress Image',
- value: 'current_image',
- description: 'Displays the current image in the Node Editor',
- tags: ['progress'],
- });
-
- data.push({
- label: 'Notes',
- value: 'notes',
- description: 'Add notes about your workflow',
- tags: ['notes'],
- });
-
- data.sort((a, b) => a.label.localeCompare(b.label));
-
- return { data };
- },
- defaultSelectorOptions
-);
-
const AddNodePopover = () => {
const dispatch = useAppDispatch();
const buildInvocation = useBuildNodeData();
const toaster = useAppToaster();
+ const { t } = useTranslation();
+
+ const selector = createSelector(
+ [stateSelector],
+ ({ nodes }) => {
+ const data: NodeTemplate[] = map(nodes.nodeTemplates, (template) => {
+ return {
+ label: template.title,
+ value: template.type,
+ description: template.description,
+ tags: template.tags,
+ };
+ });
+
+ data.push({
+ label: t('nodes.currentImage'),
+ value: 'current_image',
+ description: t('nodes.currentImageDescription'),
+ tags: ['progress'],
+ });
+
+ data.push({
+ label: t('nodes.notes'),
+ value: 'notes',
+ description: t('nodes.notesDescription'),
+ tags: ['notes'],
+ });
+
+ data.sort((a, b) => a.label.localeCompare(b.label));
+
+ return { data, t };
+ },
+ defaultSelectorOptions
+ );
+
const { data } = useAppSelector(selector);
const isOpen = useAppSelector((state) => state.nodes.isAddNodePopoverOpen);
const inputRef = useRef(null);
@@ -92,18 +95,20 @@ const AddNodePopover = () => {
const addNode = useCallback(
(nodeType: AnyInvocationType) => {
const invocation = buildInvocation(nodeType);
-
if (!invocation) {
+ const errorMessage = t('nodes.unknownInvocation', {
+ nodeType: nodeType,
+ });
toaster({
status: 'error',
- title: `Unknown Invocation type ${nodeType}`,
+ title: errorMessage,
});
return;
}
dispatch(nodeAdded(invocation));
},
- [dispatch, buildInvocation, toaster]
+ [dispatch, buildInvocation, toaster, t]
);
const handleChange = useCallback(
@@ -179,11 +184,11 @@ const AddNodePopover = () => {
{
const label = useNodeLabel(nodeId);
const title = useNodeTemplateTitle(nodeId);
const doVersionsMatch = useDoNodeVersionsMatch(nodeId);
+ const { t } = useTranslation();
return (
<>
@@ -65,7 +67,7 @@ const InvocationNodeNotes = ({ nodeId }: Props) => {
- {label || title || 'Unknown Node'}
+ {label || title || t('nodes.unknownNode')}
@@ -82,6 +84,7 @@ export default memo(InvocationNodeNotes);
const TooltipContent = memo(({ nodeId }: { nodeId: string }) => {
const data = useNodeData(nodeId);
const nodeTemplate = useNodeTemplate(nodeId);
+ const { t } = useTranslation();
const title = useMemo(() => {
if (data?.label && nodeTemplate?.title) {
@@ -96,8 +99,8 @@ const TooltipContent = memo(({ nodeId }: { nodeId: string }) => {
return nodeTemplate.title;
}
- return 'Unknown Node';
- }, [data, nodeTemplate]);
+ return t('nodes.unknownNode');
+ }, [data, nodeTemplate, t]);
const versionComponent = useMemo(() => {
if (!isInvocationNodeData(data) || !nodeTemplate) {
@@ -107,7 +110,7 @@ const TooltipContent = memo(({ nodeId }: { nodeId: string }) => {
if (!data.version) {
return (
- Version unknown
+ {t('nodes.versionUnknown')}
);
}
@@ -115,7 +118,7 @@ const TooltipContent = memo(({ nodeId }: { nodeId: string }) => {
if (!nodeTemplate.version) {
return (
- Version {data.version} (unknown template)
+ {t('nodes.version')} {data.version} ({t('nodes.unknownTemplate')})
);
}
@@ -123,7 +126,7 @@ const TooltipContent = memo(({ nodeId }: { nodeId: string }) => {
if (compare(data.version, nodeTemplate.version, '<')) {
return (
- Version {data.version} (update node)
+ {t('nodes.version')} {data.version} ({t('nodes.updateNode')})
);
}
@@ -131,16 +134,20 @@ const TooltipContent = memo(({ nodeId }: { nodeId: string }) => {
if (compare(data.version, nodeTemplate.version, '>')) {
return (
- Version {data.version} (update app)
+ {t('nodes.version')} {data.version} ({t('nodes.updateApp')})
);
}
- return Version {data.version};
- }, [data, nodeTemplate]);
+ return (
+
+ {t('nodes.version')} {data.version}
+
+ );
+ }, [data, nodeTemplate, t]);
if (!isInvocationNodeData(data)) {
- return Unknown Node;
+ return {t('nodes.unknownNode')};
}
return (
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeStatusIndicator.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeStatusIndicator.tsx
index 6e1da90ad8..28f6e08a4e 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeStatusIndicator.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeStatusIndicator.tsx
@@ -14,6 +14,7 @@ import { DRAG_HANDLE_CLASSNAME } from 'features/nodes/types/constants';
import { NodeExecutionState, NodeStatus } from 'features/nodes/types/types';
import { memo, useMemo } from 'react';
import { FaCheck, FaEllipsisH, FaExclamation } from 'react-icons/fa';
+import { useTranslation } from 'react-i18next';
type Props = {
nodeId: string;
@@ -72,10 +73,10 @@ type TooltipLabelProps = {
const TooltipLabel = memo(({ nodeExecutionState }: TooltipLabelProps) => {
const { status, progress, progressImage } = nodeExecutionState;
+ const { t } = useTranslation();
if (status === NodeStatus.PENDING) {
return Pending;
}
-
if (status === NodeStatus.IN_PROGRESS) {
if (progressImage) {
return (
@@ -97,18 +98,22 @@ const TooltipLabel = memo(({ nodeExecutionState }: TooltipLabelProps) => {
}
if (progress !== null) {
- return In Progress ({Math.round(progress * 100)}%);
+ return (
+
+ {t('nodes.executionStateInProgress')} ({Math.round(progress * 100)}%)
+
+ );
}
- return In Progress;
+ return {t('nodes.executionStateInProgress')};
}
if (status === NodeStatus.COMPLETED) {
- return Completed;
+ return {t('nodes.executionStateCompleted')};
}
if (status === NodeStatus.FAILED) {
- return nodeExecutionState.error;
+ return {t('nodes.executionStateError')};
}
return null;
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/NotesTextarea.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/NotesTextarea.tsx
index 68967096f9..5e85f5ba3c 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/NotesTextarea.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/NotesTextarea.tsx
@@ -5,10 +5,12 @@ import { useNodeData } from 'features/nodes/hooks/useNodeData';
import { nodeNotesChanged } from 'features/nodes/store/nodesSlice';
import { isInvocationNodeData } from 'features/nodes/types/types';
import { ChangeEvent, memo, useCallback } from 'react';
+import { useTranslation } from 'react-i18next';
const NotesTextarea = ({ nodeId }: { nodeId: string }) => {
const dispatch = useAppDispatch();
const data = useNodeData(nodeId);
+ const { t } = useTranslation();
const handleNotesChanged = useCallback(
(e: ChangeEvent) => {
dispatch(nodeNotesChanged({ nodeId, notes: e.target.value }));
@@ -20,7 +22,7 @@ const NotesTextarea = ({ nodeId }: { nodeId: string }) => {
}
return (
- Notes
+ {t('nodes.notes')}
{
} = props;
const label = useFieldLabel(nodeId, fieldName);
const fieldTemplateTitle = useFieldTemplateTitle(nodeId, fieldName, kind);
+ const { t } = useTranslation();
const dispatch = useAppDispatch();
const [localTitle, setLocalTitle] = useState(
- label || fieldTemplateTitle || 'Unknown Field'
+ label || fieldTemplateTitle || t('nodes.unknownFeild')
);
const handleSubmit = useCallback(
@@ -44,10 +46,10 @@ const EditableFieldTitle = forwardRef((props: Props, ref) => {
if (newTitle && (newTitle === label || newTitle === fieldTemplateTitle)) {
return;
}
- setLocalTitle(newTitle || fieldTemplateTitle || 'Unknown Field');
+ setLocalTitle(newTitle || fieldTemplateTitle || t('nodes.unknownField'));
dispatch(fieldLabelChanged({ nodeId, fieldName, label: newTitle }));
},
- [label, fieldTemplateTitle, dispatch, nodeId, fieldName]
+ [label, fieldTemplateTitle, dispatch, nodeId, fieldName, t]
);
const handleChange = useCallback((newTitle: string) => {
@@ -56,8 +58,8 @@ const EditableFieldTitle = forwardRef((props: Props, ref) => {
useEffect(() => {
// Another component may change the title; sync local title with global state
- setLocalTitle(label || fieldTemplateTitle || 'Unknown Field');
- }, [label, fieldTemplateTitle]);
+ setLocalTitle(label || fieldTemplateTitle || t('nodes.unknownField'));
+ }, [label, fieldTemplateTitle, t]);
return (
{
const label = useFieldLabel(nodeId, fieldName);
const fieldTemplateTitle = useFieldTemplateTitle(nodeId, fieldName, kind);
const input = useFieldInputKind(nodeId, fieldName);
+ const { t } = useTranslation();
const skipEvent = useCallback((e: MouseEvent) => {
e.preventDefault();
@@ -119,7 +121,9 @@ const FieldContextMenu = ({ nodeId, fieldName, kind, children }: Props) => {
motionProps={menuListMotionProps}
onContextMenu={skipEvent}
>
-
+
{menuItems}
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/FieldTooltipContent.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/FieldTooltipContent.tsx
index 1a47e81aa3..be66214a59 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/FieldTooltipContent.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/FieldTooltipContent.tsx
@@ -8,6 +8,7 @@ import {
} from 'features/nodes/types/types';
import { startCase } from 'lodash-es';
import { memo, useMemo } from 'react';
+import { useTranslation } from 'react-i18next';
interface Props {
nodeId: string;
@@ -19,6 +20,7 @@ const FieldTooltipContent = ({ nodeId, fieldName, kind }: Props) => {
const field = useFieldData(nodeId, fieldName);
const fieldTemplate = useFieldTemplate(nodeId, fieldName, kind);
const isInputTemplate = isInputFieldTemplate(fieldTemplate);
+ const { t } = useTranslation();
const fieldTitle = useMemo(() => {
if (isInputFieldValue(field)) {
if (field.label && fieldTemplate?.title) {
@@ -33,11 +35,11 @@ const FieldTooltipContent = ({ nodeId, fieldName, kind }: Props) => {
return fieldTemplate.title;
}
- return 'Unknown Field';
+ return t('nodes.unknownField');
} else {
- return fieldTemplate?.title || 'Unknown Field';
+ return fieldTemplate?.title || t('nodes.unknownField');
}
- }, [field, fieldTemplate]);
+ }, [field, fieldTemplate, t]);
return (
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/LinearViewField.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/LinearViewField.tsx
index e3983560a8..a9416380d4 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/LinearViewField.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/LinearViewField.tsx
@@ -17,6 +17,7 @@ import { FaInfoCircle, FaTrash } from 'react-icons/fa';
import EditableFieldTitle from './EditableFieldTitle';
import FieldTooltipContent from './FieldTooltipContent';
import InputFieldRenderer from './InputFieldRenderer';
+import { useTranslation } from 'react-i18next';
type Props = {
nodeId: string;
@@ -27,7 +28,7 @@ const LinearViewField = ({ nodeId, fieldName }: Props) => {
const dispatch = useAppDispatch();
const { isMouseOverNode, handleMouseOut, handleMouseOver } =
useMouseOverNode(nodeId);
-
+ const { t } = useTranslation();
const handleRemoveField = useCallback(() => {
dispatch(workflowExposedFieldRemoved({ nodeId, fieldName }));
}, [dispatch, fieldName, nodeId]);
@@ -75,8 +76,8 @@ const LinearViewField = ({ nodeId, fieldName }: Props) => {
{fieldTemplate.options.map((option) => (
-
+
))}
);
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/LoRAModelInputField.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/LoRAModelInputField.tsx
index 7f8f179add..3ca87b3e85 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/LoRAModelInputField.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/LoRAModelInputField.tsx
@@ -14,6 +14,7 @@ import { modelIdToLoRAModelParam } from 'features/parameters/util/modelIdToLoRAM
import { forEach } from 'lodash-es';
import { memo, useCallback, useMemo } from 'react';
import { useGetLoRAModelsQuery } from 'services/api/endpoints/models';
+import { useTranslation } from 'react-i18next';
const LoRAModelInputFieldComponent = (
props: FieldComponentProps<
@@ -25,6 +26,7 @@ const LoRAModelInputFieldComponent = (
const lora = field.value;
const dispatch = useAppDispatch();
const { data: loraModels } = useGetLoRAModelsQuery();
+ const { t } = useTranslation();
const data = useMemo(() => {
if (!loraModels) {
@@ -92,9 +94,11 @@ const LoRAModelInputFieldComponent = (
0 ? 'Select a LoRA' : 'No LoRAs available'}
+ placeholder={
+ data.length > 0 ? t('models.selectLoRA') : t('models.noLoRAsAvailable')
+ }
data={data}
- nothingFound="No matching LoRAs"
+ nothingFound={t('models.noMatchingLoRAs')}
itemComponent={IAIMantineSelectItemWithTooltip}
disabled={data.length === 0}
filter={(value, item: SelectItem) =>
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/MainModelInputField.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/MainModelInputField.tsx
index f89177576c..08483986e3 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/MainModelInputField.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/MainModelInputField.tsx
@@ -19,6 +19,7 @@ import {
useGetMainModelsQuery,
useGetOnnxModelsQuery,
} from 'services/api/endpoints/models';
+import { useTranslation } from 'react-i18next';
const MainModelInputFieldComponent = (
props: FieldComponentProps<
@@ -29,7 +30,7 @@ const MainModelInputFieldComponent = (
const { nodeId, field } = props;
const dispatch = useAppDispatch();
const isSyncModelEnabled = useFeatureStatus('syncModels').isFeatureEnabled;
-
+ const { t } = useTranslation();
const { data: onnxModels, isLoading: isLoadingOnnxModels } =
useGetOnnxModelsQuery(NON_SDXL_MAIN_MODELS);
const { data: mainModels, isLoading: isLoadingMainModels } =
@@ -127,7 +128,9 @@ const MainModelInputFieldComponent = (
tooltip={selectedModel?.description}
value={selectedModel?.id}
placeholder={
- data.length > 0 ? 'Select a model' : 'No models available'
+ data.length > 0
+ ? t('models.selectModel')
+ : t('models.noModelsAvailable')
}
data={data}
error={!selectedModel}
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/RefinerModelInputField.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/RefinerModelInputField.tsx
index edad33d342..19f2c5ac8e 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/RefinerModelInputField.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/RefinerModelInputField.tsx
@@ -89,7 +89,7 @@ const RefinerModelInputFieldComponent = (
return isLoading ? (
@@ -99,7 +99,11 @@ const RefinerModelInputFieldComponent = (
className="nowheel nodrag"
tooltip={selectedModel?.description}
value={selectedModel?.id}
- placeholder={data.length > 0 ? 'Select a model' : 'No models available'}
+ placeholder={
+ data.length > 0
+ ? t('models.selectModel')
+ : t('models.noModelsAvailable')
+ }
data={data}
error={!selectedModel}
disabled={data.length === 0}
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/SDXLMainModelInputField.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/SDXLMainModelInputField.tsx
index ffb4d8d412..89cb8d5150 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/SDXLMainModelInputField.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/SDXLMainModelInputField.tsx
@@ -116,7 +116,7 @@ const ModelInputFieldComponent = (
return isLoading ? (
@@ -126,7 +126,11 @@ const ModelInputFieldComponent = (
className="nowheel nodrag"
tooltip={selectedModel?.description}
value={selectedModel?.id}
- placeholder={data.length > 0 ? 'Select a model' : 'No models available'}
+ placeholder={
+ data.length > 0
+ ? t('models.selectModel')
+ : t('models.noModelsAvailable')
+ }
data={data}
error={!selectedModel}
disabled={data.length === 0}
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/NodeTitle.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/NodeTitle.tsx
index 283e5d115d..e31ac19be0 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/NodeTitle.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/common/NodeTitle.tsx
@@ -12,6 +12,7 @@ import { useNodeTemplateTitle } from 'features/nodes/hooks/useNodeTemplateTitle'
import { nodeLabelChanged } from 'features/nodes/store/nodesSlice';
import { DRAG_HANDLE_CLASSNAME } from 'features/nodes/types/constants';
import { MouseEvent, memo, useCallback, useEffect, useState } from 'react';
+import { useTranslation } from 'react-i18next';
type Props = {
nodeId: string;
@@ -22,16 +23,17 @@ const NodeTitle = ({ nodeId, title }: Props) => {
const dispatch = useAppDispatch();
const label = useNodeLabel(nodeId);
const templateTitle = useNodeTemplateTitle(nodeId);
+ const { t } = useTranslation();
const [localTitle, setLocalTitle] = useState('');
const handleSubmit = useCallback(
async (newTitle: string) => {
dispatch(nodeLabelChanged({ nodeId, label: newTitle }));
setLocalTitle(
- newTitle || title || templateTitle || 'Problem Setting Title'
+ label || title || templateTitle || t('nodes.problemSettingTitle')
);
},
- [dispatch, nodeId, title, templateTitle]
+ [dispatch, nodeId, title, templateTitle, label, t]
);
const handleChange = useCallback((newTitle: string) => {
@@ -40,8 +42,10 @@ const NodeTitle = ({ nodeId, title }: Props) => {
useEffect(() => {
// Another component may change the title; sync local title with global state
- setLocalTitle(label || title || templateTitle || 'Problem Setting Title');
- }, [label, templateTitle, title]);
+ setLocalTitle(
+ label || title || templateTitle || t('nodes.problemSettingTitle')
+ );
+ }, [label, templateTitle, title, t]);
return (
state.nodes.nodeOpacity);
+ const { t } = useTranslation();
const handleChange = useCallback(
(v: number) => {
@@ -23,7 +25,7 @@ export default function NodeOpacitySlider() {
return (
{
const dispatch = useAppDispatch();
-
+ const { t } = useTranslation();
const handleOpenAddNodePopover = useCallback(() => {
dispatch(addNodePopoverOpened());
}, [dispatch]);
@@ -15,8 +16,8 @@ const TopLeftPanel = () => {
return (
}
onClick={handleOpenAddNodePopover}
/>
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopRightPanel/WorkflowEditorSettings.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopRightPanel/WorkflowEditorSettings.tsx
index c423750cd8..b822b2abb9 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopRightPanel/WorkflowEditorSettings.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopRightPanel/WorkflowEditorSettings.tsx
@@ -29,6 +29,7 @@ import { ChangeEvent, memo, useCallback } from 'react';
import { FaCog } from 'react-icons/fa';
import { SelectionMode } from 'reactflow';
import ReloadNodeTemplatesButton from '../TopCenterPanel/ReloadSchemaButton';
+import { useTranslation } from 'react-i18next';
const formLabelProps: FormLabelProps = {
fontWeight: 600,
@@ -101,12 +102,14 @@ const WorkflowEditorSettings = forwardRef((_, ref) => {
[dispatch]
);
+ const { t } = useTranslation();
+
return (
<>
}
onClick={onOpen}
/>
@@ -114,7 +117,7 @@ const WorkflowEditorSettings = forwardRef((_, ref) => {
- Workflow Editor Settings
+ {t('nodes.workflowSettings')}
{
formLabelProps={formLabelProps}
onChange={handleChangeShouldAnimate}
isChecked={shouldAnimateEdges}
- label="Animated Edges"
- helperText="Animate selected edges and edges connected to selected nodes"
+ label={t('nodes.animatedEdges')}
+ helperText={t('nodes.animatedEdgesHelp')}
/>
Advanced
@@ -162,8 +165,8 @@ const WorkflowEditorSettings = forwardRef((_, ref) => {
formLabelProps={formLabelProps}
isChecked={shouldValidateGraph}
onChange={handleChangeShouldValidate}
- label="Validate Connections and Graph"
- helperText="Prevent invalid connections from being made, and invalid graphs from being invoked"
+ label={t('nodes.validateConnections')}
+ helperText={t('nodes.validateConnectionsHelp')}
/>
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorDetailsTab.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorDetailsTab.tsx
index d626a92021..ffc260b95a 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorDetailsTab.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorDetailsTab.tsx
@@ -9,6 +9,7 @@ import { memo } from 'react';
import NotesTextarea from '../../flow/nodes/Invocation/NotesTextarea';
import NodeTitle from '../../flow/nodes/common/NodeTitle';
import ScrollableContent from '../ScrollableContent';
+import { useTranslation } from 'react-i18next';
const selector = createSelector(
stateSelector,
@@ -34,9 +35,12 @@ const selector = createSelector(
const InspectorDetailsTab = () => {
const { data, template } = useAppSelector(selector);
+ const { t } = useTranslation();
if (!template || !data) {
- return ;
+ return (
+
+ );
}
return ;
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorOutputsTab.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorOutputsTab.tsx
index f6b229f997..b6a194051e 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorOutputsTab.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorOutputsTab.tsx
@@ -11,6 +11,7 @@ import { ImageOutput } from 'services/api/types';
import { AnyResult } from 'services/events/types';
import ScrollableContent from '../ScrollableContent';
import ImageOutputPreview from './outputs/ImageOutputPreview';
+import { useTranslation } from 'react-i18next';
const selector = createSelector(
stateSelector,
@@ -40,13 +41,18 @@ const selector = createSelector(
const InspectorOutputsTab = () => {
const { node, template, nes } = useAppSelector(selector);
+ const { t } = useTranslation();
if (!node || !nes || !isInvocationNode(node)) {
- return ;
+ return (
+
+ );
}
if (nes.outputs.length === 0) {
- return ;
+ return (
+
+ );
}
return (
@@ -77,7 +83,7 @@ const InspectorOutputsTab = () => {
/>
))
) : (
-
+
)}
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorTemplateTab.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorTemplateTab.tsx
index 525b58b1cb..b5e358239a 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorTemplateTab.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/inspector/InspectorTemplateTab.tsx
@@ -5,6 +5,7 @@ import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
import DataViewer from 'features/gallery/components/ImageMetadataViewer/DataViewer';
import { memo } from 'react';
+import { useTranslation } from 'react-i18next';
const selector = createSelector(
stateSelector,
@@ -29,12 +30,15 @@ const selector = createSelector(
const NodeTemplateInspector = () => {
const { template } = useAppSelector(selector);
+ const { t } = useTranslation();
if (!template) {
- return ;
+ return (
+
+ );
}
- return ;
+ return ;
};
export default memo(NodeTemplateInspector);
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowGeneralTab.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowGeneralTab.tsx
index e36675b71f..ad1070096e 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowGeneralTab.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowGeneralTab.tsx
@@ -16,6 +16,7 @@ import {
} from 'features/nodes/store/nodesSlice';
import { ChangeEvent, memo, useCallback } from 'react';
import ScrollableContent from '../ScrollableContent';
+import { useTranslation } from 'react-i18next';
const selector = createSelector(
stateSelector,
@@ -85,6 +86,8 @@ const WorkflowGeneralTab = () => {
[dispatch]
);
+ const { t } = useTranslation();
+
return (
{
}}
>
-
+
-
+
- Short Description
+ {t('nodes.workflowDescription')}
{
/>
- Notes
+ {t('nodes.workflowNotes')}
{
const workflow = useWorkflow();
+ const { t } = useTranslation();
return (
{
h: 'full',
}}
>
-
+
);
};
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLinearTab.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLinearTab.tsx
index d1cecefbff..cf9e11d9d6 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLinearTab.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLinearTab.tsx
@@ -7,6 +7,7 @@ import { IAINoContentFallback } from 'common/components/IAIImageFallback';
import { memo } from 'react';
import LinearViewField from '../../flow/nodes/Invocation/fields/LinearViewField';
import ScrollableContent from '../ScrollableContent';
+import { useTranslation } from 'react-i18next';
const selector = createSelector(
stateSelector,
@@ -20,6 +21,7 @@ const selector = createSelector(
const WorkflowLinearTab = () => {
const { fields } = useAppSelector(selector);
+ const { t } = useTranslation();
return (
{
))
) : (
)}
diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useLoadWorkflowFromFile.tsx b/invokeai/frontend/web/src/features/nodes/hooks/useLoadWorkflowFromFile.tsx
index 7f015ac5eb..890fa7a72d 100644
--- a/invokeai/frontend/web/src/features/nodes/hooks/useLoadWorkflowFromFile.tsx
+++ b/invokeai/frontend/web/src/features/nodes/hooks/useLoadWorkflowFromFile.tsx
@@ -9,10 +9,12 @@ import { memo, useCallback } from 'react';
import { ZodError } from 'zod';
import { fromZodError, fromZodIssue } from 'zod-validation-error';
import { workflowLoadRequested } from '../store/actions';
+import { useTranslation } from 'react-i18next';
export const useLoadWorkflowFromFile = () => {
const dispatch = useAppDispatch();
const logger = useLogger('nodes');
+ const { t } = useTranslation();
const loadWorkflowFromFile = useCallback(
(file: File | null) => {
if (!file) {
@@ -28,7 +30,7 @@ export const useLoadWorkflowFromFile = () => {
if (!result.success) {
const { message } = fromZodError(result.error, {
- prefix: 'Workflow Validation Error',
+ prefix: t('nodes.workflowValidation'),
});
logger.error({ error: parseify(result.error) }, message);
@@ -36,7 +38,7 @@ export const useLoadWorkflowFromFile = () => {
dispatch(
addToast(
makeToast({
- title: 'Unable to Validate Workflow',
+ title: t('nodes.unableToValidateWorkflow'),
status: 'error',
duration: 5000,
})
@@ -54,7 +56,7 @@ export const useLoadWorkflowFromFile = () => {
dispatch(
addToast(
makeToast({
- title: 'Unable to Load Workflow',
+ title: t('nodes.unableToLoadWorkflow'),
status: 'error',
})
)
@@ -64,7 +66,7 @@ export const useLoadWorkflowFromFile = () => {
reader.readAsText(file);
},
- [dispatch, logger]
+ [dispatch, logger, t]
);
return loadWorkflowFromFile;
diff --git a/invokeai/frontend/web/src/features/nodes/store/util/makeIsConnectionValidSelector.ts b/invokeai/frontend/web/src/features/nodes/store/util/makeIsConnectionValidSelector.ts
index 5cb6d557e8..ac157bb476 100644
--- a/invokeai/frontend/web/src/features/nodes/store/util/makeIsConnectionValidSelector.ts
+++ b/invokeai/frontend/web/src/features/nodes/store/util/makeIsConnectionValidSelector.ts
@@ -9,6 +9,7 @@ import {
} from 'features/nodes/types/constants';
import { FieldType } from 'features/nodes/types/types';
import { HandleType } from 'reactflow';
+import i18n from 'i18next';
/**
* NOTE: The logic here must be duplicated in `invokeai/frontend/web/src/features/nodes/hooks/useIsValidConnection.ts`
@@ -20,17 +21,17 @@ export const makeConnectionErrorSelector = (
fieldName: string,
handleType: HandleType,
fieldType?: FieldType
-) =>
- createSelector(stateSelector, (state) => {
+) => {
+ return createSelector(stateSelector, (state) => {
if (!fieldType) {
- return 'No field type';
+ return i18n.t('nodes.noFieldType');
}
const { currentConnectionFieldType, connectionStartParams, nodes, edges } =
state.nodes;
if (!connectionStartParams || !currentConnectionFieldType) {
- return 'No connection in progress';
+ return i18n.t('nodes.noConnectionInProgress');
}
const {
@@ -40,7 +41,7 @@ export const makeConnectionErrorSelector = (
} = connectionStartParams;
if (!connectionHandleType || !connectionNodeId || !connectionFieldName) {
- return 'No connection data';
+ return i18n.t('nodes.noConnectionData');
}
const targetType =
@@ -49,14 +50,14 @@ export const makeConnectionErrorSelector = (
handleType === 'source' ? fieldType : currentConnectionFieldType;
if (nodeId === connectionNodeId) {
- return 'Cannot connect to self';
+ return i18n.t('nodes.cannotConnectToSelf');
}
if (handleType === connectionHandleType) {
if (handleType === 'source') {
- return 'Cannot connect output to output';
+ return i18n.t('nodes.cannotConnectOutputToOutput');
}
- return 'Cannot connect input to input';
+ return i18n.t('nodes.cannotConnectInputToInput');
}
if (
@@ -66,7 +67,7 @@ export const makeConnectionErrorSelector = (
// except CollectionItem inputs can have multiples
targetType !== 'CollectionItem'
) {
- return 'Input may only have one connection';
+ return i18n.t('nodes.inputMayOnlyHaveOneConnection');
}
/**
@@ -125,7 +126,7 @@ export const makeConnectionErrorSelector = (
isIntToFloat
)
) {
- return 'Field types must match';
+ return i18n.t('nodes.fieldTypesMustMatch');
}
}
@@ -137,8 +138,9 @@ export const makeConnectionErrorSelector = (
);
if (!isGraphAcyclic) {
- return 'Connection would create a cycle';
+ return i18n.t('nodes.connectionWouldCreateCycle');
}
return null;
});
+};
diff --git a/invokeai/frontend/web/src/features/nodes/types/types.ts b/invokeai/frontend/web/src/features/nodes/types/types.ts
index 402ef4ac7a..b1ad6a7b96 100644
--- a/invokeai/frontend/web/src/features/nodes/types/types.ts
+++ b/invokeai/frontend/web/src/features/nodes/types/types.ts
@@ -286,7 +286,7 @@ export type BooleanPolymorphicInputFieldValue = z.infer<
export const zEnumInputFieldValue = zInputFieldValueBase.extend({
type: z.literal('enum'),
- value: z.union([z.string(), z.number()]).optional(),
+ value: z.string().optional(),
});
export type EnumInputFieldValue = z.infer;
@@ -822,10 +822,10 @@ export type ControlPolymorphicInputFieldTemplate = Omit<
};
export type EnumInputFieldTemplate = InputFieldTemplateBase & {
- default: string | number;
+ default: string;
type: 'enum';
- enumType: 'string' | 'number';
- options: Array;
+ options: string[];
+ labels?: { [key: string]: string };
};
export type MainModelInputFieldTemplate = InputFieldTemplateBase & {
diff --git a/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts b/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts
index 20463f37f6..c5e6be75c0 100644
--- a/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts
+++ b/invokeai/frontend/web/src/features/nodes/util/fieldTemplateBuilders.ts
@@ -656,8 +656,8 @@ const buildEnumInputFieldTemplate = ({
const template: EnumInputFieldTemplate = {
...baseField,
type: 'enum',
- enumType: (schemaObject.type as 'string' | 'number') ?? 'string', // TODO: dangerous?
- options: options,
+ options,
+ ui_choice_labels: schemaObject.ui_choice_labels,
default: schemaObject.default ?? options[0],
};
diff --git a/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts b/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts
index a3046feee7..3052d4e230 100644
--- a/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts
+++ b/invokeai/frontend/web/src/features/nodes/util/fieldValueBuilders.ts
@@ -1,8 +1,7 @@
import { InputFieldTemplate, InputFieldValue } from '../types/types';
const FIELD_VALUE_FALLBACK_MAP = {
- 'enum.number': 0,
- 'enum.string': '',
+ enum: '',
boolean: false,
BooleanCollection: [],
BooleanPolymorphic: false,
@@ -62,19 +61,8 @@ export const buildInputFieldValue = (
fieldKind: 'input',
} as InputFieldValue;
- if (template.type === 'enum') {
- if (template.enumType === 'number') {
- fieldValue.value =
- template.default ?? FIELD_VALUE_FALLBACK_MAP['enum.number'];
- }
- if (template.enumType === 'string') {
- fieldValue.value =
- template.default ?? FIELD_VALUE_FALLBACK_MAP['enum.string'];
- }
- } else {
- fieldValue.value =
- template.default ?? FIELD_VALUE_FALLBACK_MAP[template.type];
- }
+ fieldValue.value =
+ template.default ?? FIELD_VALUE_FALLBACK_MAP[template.type];
return fieldValue;
};
diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/Advanced/ParamAdvancedCollapse.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/Advanced/ParamAdvancedCollapse.tsx
index bca1402571..2d461f7bb4 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Parameters/Advanced/ParamAdvancedCollapse.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/Advanced/ParamAdvancedCollapse.tsx
@@ -5,6 +5,7 @@ import { useAppSelector } from 'app/store/storeHooks';
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
import IAICollapse from 'common/components/IAICollapse';
import ParamClipSkip from './ParamClipSkip';
+import { useTranslation } from 'react-i18next';
const selector = createSelector(
stateSelector,
@@ -22,13 +23,13 @@ export default function ParamAdvancedCollapse() {
const shouldShowAdvancedOptions = useAppSelector(
(state: RootState) => state.generation.shouldShowAdvancedOptions
);
-
+ const { t } = useTranslation();
if (!shouldShowAdvancedOptions) {
return null;
}
return (
-
+
diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/Noise/ParamCpuNoise.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/Noise/ParamCpuNoise.tsx
index 45fd7fcf57..f10c3dd1a5 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Parameters/Noise/ParamCpuNoise.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/Noise/ParamCpuNoise.tsx
@@ -5,6 +5,7 @@ import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
import IAISwitch from 'common/components/IAISwitch';
import { shouldUseCpuNoiseChanged } from 'features/parameters/store/generationSlice';
import { ChangeEvent } from 'react';
+import { useTranslation } from 'react-i18next';
const selector = createSelector(
stateSelector,
@@ -21,6 +22,7 @@ const selector = createSelector(
export const ParamCpuNoiseToggle = () => {
const dispatch = useAppDispatch();
const { isDisabled, shouldUseCpuNoise } = useAppSelector(selector);
+ const { t } = useTranslation();
const handleChange = (e: ChangeEvent) =>
dispatch(shouldUseCpuNoiseChanged(e.target.checked));
@@ -28,7 +30,7 @@ export const ParamCpuNoiseToggle = () => {
return (
diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/Noise/ParamNoiseToggle.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/Noise/ParamNoiseToggle.tsx
index c1c2fb5119..4ec6b0163b 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Parameters/Noise/ParamNoiseToggle.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/Noise/ParamNoiseToggle.tsx
@@ -3,9 +3,11 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import IAISwitch from 'common/components/IAISwitch';
import { setShouldUseNoiseSettings } from 'features/parameters/store/generationSlice';
import { ChangeEvent } from 'react';
+import { useTranslation } from 'react-i18next';
export const ParamNoiseToggle = () => {
const dispatch = useAppDispatch();
+ const { t } = useTranslation();
const shouldUseNoiseSettings = useAppSelector(
(state: RootState) => state.generation.shouldUseNoiseSettings
@@ -16,7 +18,7 @@ export const ParamNoiseToggle = () => {
return (
diff --git a/invokeai/frontend/web/src/features/parameters/components/ProcessButtons/CancelButton.tsx b/invokeai/frontend/web/src/features/parameters/components/ProcessButtons/CancelButton.tsx
index e7bd36b931..6a80ccb52f 100644
--- a/invokeai/frontend/web/src/features/parameters/components/ProcessButtons/CancelButton.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/ProcessButtons/CancelButton.tsx
@@ -146,7 +146,7 @@ const CancelButton = (props: Props) => {
id="cancel-button"
{...rest}
>
- Cancel
+ {t('parameters.cancel.cancel')}
)}