mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
merge with main
This commit is contained in:
commit
4c5aedbcba
98
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
98
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@ -6,10 +6,6 @@ title: '[bug]: '
|
|||||||
|
|
||||||
labels: ['bug']
|
labels: ['bug']
|
||||||
|
|
||||||
# assignees:
|
|
||||||
# - moderator_bot
|
|
||||||
# - lstein
|
|
||||||
|
|
||||||
body:
|
body:
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
@ -18,10 +14,9 @@ body:
|
|||||||
|
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Is there an existing issue for this?
|
label: Is there an existing issue for this problem?
|
||||||
description: |
|
description: |
|
||||||
Please use the [search function](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen+label%3Abug)
|
Please [search](https://github.com/invoke-ai/InvokeAI/issues) first to see if an issue already exists for the problem.
|
||||||
irst to see if an issue already exists for the bug you encountered.
|
|
||||||
options:
|
options:
|
||||||
- label: I have searched the existing issues
|
- label: I have searched the existing issues
|
||||||
required: true
|
required: true
|
||||||
@ -33,80 +28,119 @@ body:
|
|||||||
- type: dropdown
|
- type: dropdown
|
||||||
id: os_dropdown
|
id: os_dropdown
|
||||||
attributes:
|
attributes:
|
||||||
label: OS
|
label: Operating system
|
||||||
description: Which operating System did you use when the bug occured
|
description: Your computer's operating system.
|
||||||
multiple: false
|
multiple: false
|
||||||
options:
|
options:
|
||||||
- 'Linux'
|
- 'Linux'
|
||||||
- 'Windows'
|
- 'Windows'
|
||||||
- 'macOS'
|
- 'macOS'
|
||||||
|
- 'other'
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
- type: dropdown
|
- type: dropdown
|
||||||
id: gpu_dropdown
|
id: gpu_dropdown
|
||||||
attributes:
|
attributes:
|
||||||
label: GPU
|
label: GPU vendor
|
||||||
description: Which kind of Graphic-Adapter is your System using
|
description: Your GPU's vendor.
|
||||||
multiple: false
|
multiple: false
|
||||||
options:
|
options:
|
||||||
- 'cuda'
|
- 'Nvidia (CUDA)'
|
||||||
- 'amd'
|
- 'AMD (ROCm)'
|
||||||
- 'mps'
|
- 'Apple Silicon (MPS)'
|
||||||
- 'cpu'
|
- 'None (CPU)'
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: gpu_model
|
||||||
|
attributes:
|
||||||
|
label: GPU model
|
||||||
|
description: Your GPU's model. If on Apple Silicon, this is your Mac's chip. Leave blank if on CPU.
|
||||||
|
placeholder: ex. RTX 2080 Ti, Mac M1 Pro
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
- type: input
|
- type: input
|
||||||
id: vram
|
id: vram
|
||||||
attributes:
|
attributes:
|
||||||
label: VRAM
|
label: GPU VRAM
|
||||||
description: Size of the VRAM if known
|
description: Your GPU's VRAM. If on Apple Silicon, this is your Mac's unified memory. Leave blank if on CPU.
|
||||||
placeholder: 8GB
|
placeholder: 8GB
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
- type: input
|
- type: input
|
||||||
id: version-number
|
id: version-number
|
||||||
attributes:
|
attributes:
|
||||||
label: What version did you experience this issue on?
|
label: Version number
|
||||||
description: |
|
description: |
|
||||||
Please share the version of Invoke AI that you experienced the issue on. If this is not the latest version, please update first to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
The version of Invoke you have installed. If it is not the latest version, please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||||
placeholder: X.X.X
|
placeholder: ex. 3.6.1
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: browser-version
|
||||||
|
attributes:
|
||||||
|
label: Browser
|
||||||
|
description: Your web browser and version.
|
||||||
|
placeholder: ex. Firefox 123.0b3
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: python-deps
|
||||||
|
attributes:
|
||||||
|
label: Python dependencies
|
||||||
|
description: |
|
||||||
|
If the problem occurred during image generation, click the gear icon at the bottom left corner, click "About", click the copy button and then paste here.
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: what-happened
|
id: what-happened
|
||||||
attributes:
|
attributes:
|
||||||
label: What happened?
|
label: What happened
|
||||||
description: |
|
description: |
|
||||||
Briefly describe what happened, what you expected to happen and how to reproduce this bug.
|
Describe what happened. Include any relevant error messages, stack traces and screenshots here.
|
||||||
placeholder: When using the webinterface and right-clicking on button X instead of the popup-menu there error Y appears
|
placeholder: I clicked button X and then Y happened.
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
|
id: what-you-expected
|
||||||
attributes:
|
attributes:
|
||||||
label: Screenshots
|
label: What you expected to happen
|
||||||
description: If applicable, add screenshots to help explain your problem
|
description: Describe what you expected to happen.
|
||||||
placeholder: this is what the result looked like <screenshot>
|
placeholder: I expected Z to happen.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: how-to-repro
|
||||||
|
attributes:
|
||||||
|
label: How to reproduce the problem
|
||||||
|
description: List steps to reproduce the problem.
|
||||||
|
placeholder: Start the app, generate an image with these settings, then click button X.
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
|
id: additional-context
|
||||||
attributes:
|
attributes:
|
||||||
label: Additional context
|
label: Additional context
|
||||||
description: Add any other context about the problem here
|
description: Any other context that might help us to understand the problem.
|
||||||
placeholder: Only happens when there is full moon and Friday the 13th on Christmas Eve 🎅🏻
|
placeholder: Only happens when there is full moon and Friday the 13th on Christmas Eve 🎅🏻
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
- type: input
|
- type: input
|
||||||
id: contact
|
id: discord-username
|
||||||
attributes:
|
attributes:
|
||||||
label: Contact Details
|
label: Discord username
|
||||||
description: __OPTIONAL__ How can we get in touch with you if we need more info (besides this issue)?
|
description: If you are on the Invoke discord and would prefer to be contacted there, please provide your username.
|
||||||
placeholder: ex. email@example.com, discordname, twitter, ...
|
placeholder: supercoolusername123
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
|
59
.github/pr_labels.yml
vendored
Normal file
59
.github/pr_labels.yml
vendored
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
Root:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: '*'
|
||||||
|
|
||||||
|
PythonDeps:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'pyproject.toml'
|
||||||
|
|
||||||
|
Python:
|
||||||
|
- changed-files:
|
||||||
|
- all-globs-to-any-file:
|
||||||
|
- 'invokeai/**'
|
||||||
|
- '!invokeai/frontend/web/**'
|
||||||
|
|
||||||
|
PythonTests:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'tests/**'
|
||||||
|
|
||||||
|
CICD:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: .github/**
|
||||||
|
|
||||||
|
Docker:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: docker/**
|
||||||
|
|
||||||
|
Installer:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: installer/**
|
||||||
|
|
||||||
|
Documentation:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: docs/**
|
||||||
|
|
||||||
|
Invocations:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'invokeai/app/invocations/**'
|
||||||
|
|
||||||
|
Backend:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'invokeai/backend/**'
|
||||||
|
|
||||||
|
Api:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'invokeai/app/api/**'
|
||||||
|
|
||||||
|
Services:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'invokeai/app/services/**'
|
||||||
|
|
||||||
|
FrontendDeps:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- '**/*/package.json'
|
||||||
|
- '**/*/pnpm-lock.yaml'
|
||||||
|
|
||||||
|
Frontend:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'invokeai/frontend/web/**'
|
16
.github/workflows/label-pr.yml
vendored
Normal file
16
.github/workflows/label-pr.yml
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
name: "Pull Request Labeler"
|
||||||
|
on:
|
||||||
|
- pull_request_target
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
labeler:
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pull-requests: write
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- uses: actions/labeler@v5
|
||||||
|
with:
|
||||||
|
configuration-path: .github/pr_labels.yml
|
@ -169,7 +169,7 @@ the command `npm install -g pnpm` if needed)
|
|||||||
_For Linux with an AMD GPU:_
|
_For Linux with an AMD GPU:_
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||||
```
|
```
|
||||||
|
|
||||||
_For non-GPU systems:_
|
_For non-GPU systems:_
|
||||||
|
@ -1,76 +0,0 @@
|
|||||||
# Contributing to the Frontend
|
|
||||||
|
|
||||||
# InvokeAI Web UI
|
|
||||||
|
|
||||||
- [InvokeAI Web UI](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#invokeai-web-ui)
|
|
||||||
- [Stack](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#stack)
|
|
||||||
- [Contributing](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#contributing)
|
|
||||||
- [Dev Environment](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#dev-environment)
|
|
||||||
- [Production builds](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#production-builds)
|
|
||||||
|
|
||||||
The UI is a fairly straightforward Typescript React app, with the Unified Canvas being more complex.
|
|
||||||
|
|
||||||
Code is located in `invokeai/frontend/web/` for review.
|
|
||||||
|
|
||||||
## Stack
|
|
||||||
|
|
||||||
State management is Redux via [Redux Toolkit](https://github.com/reduxjs/redux-toolkit). We lean heavily on RTK:
|
|
||||||
|
|
||||||
- `createAsyncThunk` for HTTP requests
|
|
||||||
- `createEntityAdapter` for fetching images and models
|
|
||||||
- `createListenerMiddleware` for workflows
|
|
||||||
|
|
||||||
The API client and associated types are generated from the OpenAPI schema. See API_CLIENT.md.
|
|
||||||
|
|
||||||
Communication with server is a mix of HTTP and [socket.io](https://github.com/socketio/socket.io-client) (with a simple socket.io redux middleware to help).
|
|
||||||
|
|
||||||
[Chakra-UI](https://github.com/chakra-ui/chakra-ui) & [Mantine](https://github.com/mantinedev/mantine) for components and styling.
|
|
||||||
|
|
||||||
[Konva](https://github.com/konvajs/react-konva) for the canvas, but we are pushing the limits of what is feasible with it (and HTML canvas in general). We plan to rebuild it with [PixiJS](https://github.com/pixijs/pixijs) to take advantage of WebGL's improved raster handling.
|
|
||||||
|
|
||||||
[Vite](https://vitejs.dev/) for bundling.
|
|
||||||
|
|
||||||
Localisation is via [i18next](https://github.com/i18next/react-i18next), but translation happens on our [Weblate](https://hosted.weblate.org/engage/invokeai/) project. Only the English source strings should be changed on this repo.
|
|
||||||
|
|
||||||
## Contributing
|
|
||||||
|
|
||||||
Thanks for your interest in contributing to the InvokeAI Web UI!
|
|
||||||
|
|
||||||
We encourage you to ping @psychedelicious and @blessedcoolant on [Discord](https://discord.gg/ZmtBAhwWhy) if you want to contribute, just to touch base and ensure your work doesn't conflict with anything else going on. The project is very active.
|
|
||||||
|
|
||||||
### Dev Environment
|
|
||||||
|
|
||||||
**Setup**
|
|
||||||
|
|
||||||
1. Install [node](https://nodejs.org/en/download/). You can confirm node is installed with:
|
|
||||||
```bash
|
|
||||||
node --version
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Install [pnpm](https://pnpm.io/) and confirm it is installed by running this:
|
|
||||||
```bash
|
|
||||||
npm install --global pnpm
|
|
||||||
pnpm --version
|
|
||||||
```
|
|
||||||
|
|
||||||
From `invokeai/frontend/web/` run `pnpm install` to get everything set up.
|
|
||||||
|
|
||||||
Start everything in dev mode:
|
|
||||||
1. Ensure your virtual environment is running
|
|
||||||
2. Start the dev server: `pnpm dev`
|
|
||||||
3. Start the InvokeAI Nodes backend: `python scripts/invokeai-web.py # run from the repo root`
|
|
||||||
4. Point your browser to the dev server address e.g. [http://localhost:5173/](http://localhost:5173/)
|
|
||||||
|
|
||||||
### VSCode Remote Dev
|
|
||||||
|
|
||||||
We've noticed an intermittent issue with the VSCode Remote Dev port forwarding. If you use this feature of VSCode, you may intermittently click the Invoke button and then get nothing until the request times out. Suggest disabling the IDE's port forwarding feature and doing it manually via SSH:
|
|
||||||
|
|
||||||
`ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@host`
|
|
||||||
|
|
||||||
### Production builds
|
|
||||||
|
|
||||||
For a number of technical and logistical reasons, we need to commit UI build artefacts to the repo.
|
|
||||||
|
|
||||||
If you submit a PR, there is a good chance we will ask you to include a separate commit with a build of the app.
|
|
||||||
|
|
||||||
To build for production, run `pnpm build`.
|
|
@ -12,7 +12,7 @@ To get started, take a look at our [new contributors checklist](newContributorCh
|
|||||||
Once you're setup, for more information, you can review the documentation specific to your area of interest:
|
Once you're setup, for more information, you can review the documentation specific to your area of interest:
|
||||||
|
|
||||||
* #### [InvokeAI Architecure](../ARCHITECTURE.md)
|
* #### [InvokeAI Architecure](../ARCHITECTURE.md)
|
||||||
* #### [Frontend Documentation](./contributingToFrontend.md)
|
* #### [Frontend Documentation](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web)
|
||||||
* #### [Node Documentation](../INVOCATIONS.md)
|
* #### [Node Documentation](../INVOCATIONS.md)
|
||||||
* #### [Local Development](../LOCAL_DEVELOPMENT.md)
|
* #### [Local Development](../LOCAL_DEVELOPMENT.md)
|
||||||
|
|
||||||
|
BIN
docs/img/favicon.ico
Normal file
BIN
docs/img/favicon.ico
Normal file
Binary file not shown.
After Width: | Height: | Size: 4.2 KiB |
@ -117,6 +117,11 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB of RAM.
|
|||||||
|
|
||||||
## :octicons-gift-24: InvokeAI Features
|
## :octicons-gift-24: InvokeAI Features
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
- [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
|
||||||
|
- [Manual Installation](installation/020_INSTALL_MANUAL.md)
|
||||||
|
- [Docker Installation](installation/040_INSTALL_DOCKER.md)
|
||||||
|
|
||||||
### The InvokeAI Web Interface
|
### The InvokeAI Web Interface
|
||||||
- [WebUI overview](features/WEB.md)
|
- [WebUI overview](features/WEB.md)
|
||||||
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
||||||
|
@ -477,7 +477,7 @@ Then type the following commands:
|
|||||||
|
|
||||||
=== "AMD System"
|
=== "AMD System"
|
||||||
```bash
|
```bash
|
||||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||||
```
|
```
|
||||||
|
|
||||||
### Corrupted configuration file
|
### Corrupted configuration file
|
||||||
|
@ -154,7 +154,7 @@ manager, please follow these steps:
|
|||||||
=== "ROCm (AMD)"
|
=== "ROCm (AMD)"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "CPU (Intel Macs & non-GPU systems)"
|
=== "CPU (Intel Macs & non-GPU systems)"
|
||||||
@ -313,7 +313,7 @@ code for InvokeAI. For this to work, you will need to install the
|
|||||||
on your system, please see the [Git Installation
|
on your system, please see the [Git Installation
|
||||||
Guide](https://github.com/git-guides/install-git)
|
Guide](https://github.com/git-guides/install-git)
|
||||||
|
|
||||||
You will also need to install the [frontend development toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/docs/contributing/contribution_guides/contributingToFrontend.md).
|
You will also need to install the [frontend development toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/README.md).
|
||||||
|
|
||||||
If you have a "normal" installation, you should create a totally separate virtual environment for the git-based installation, else the two may interfere.
|
If you have a "normal" installation, you should create a totally separate virtual environment for the git-based installation, else the two may interfere.
|
||||||
|
|
||||||
@ -345,7 +345,7 @@ installation protocol (important!)
|
|||||||
|
|
||||||
=== "ROCm (AMD)"
|
=== "ROCm (AMD)"
|
||||||
```bash
|
```bash
|
||||||
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "CPU (Intel Macs & non-GPU systems)"
|
=== "CPU (Intel Macs & non-GPU systems)"
|
||||||
@ -361,7 +361,7 @@ installation protocol (important!)
|
|||||||
Be sure to pass `-e` (for an editable install) and don't forget the
|
Be sure to pass `-e` (for an editable install) and don't forget the
|
||||||
dot ("."). It is part of the command.
|
dot ("."). It is part of the command.
|
||||||
|
|
||||||
5. Install the [frontend toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/docs/contributing/contribution_guides/contributingToFrontend.md) and do a production build of the UI as described.
|
5. Install the [frontend toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/README.md) and do a production build of the UI as described.
|
||||||
|
|
||||||
6. You can now run `invokeai` and its related commands. The code will be
|
6. You can now run `invokeai` and its related commands. The code will be
|
||||||
read from the repository, so that you can edit the .py source files
|
read from the repository, so that you can edit the .py source files
|
||||||
|
@ -134,7 +134,7 @@ recipes are available
|
|||||||
|
|
||||||
When installing torch and torchvision manually with `pip`, remember to provide
|
When installing torch and torchvision manually with `pip`, remember to provide
|
||||||
the argument `--extra-index-url
|
the argument `--extra-index-url
|
||||||
https://download.pytorch.org/whl/rocm5.4.2` as described in the [Manual
|
https://download.pytorch.org/whl/rocm5.6` as described in the [Manual
|
||||||
Installation Guide](020_INSTALL_MANUAL.md).
|
Installation Guide](020_INSTALL_MANUAL.md).
|
||||||
|
|
||||||
This will be done automatically for you if you use the installer
|
This will be done automatically for you if you use the installer
|
||||||
|
@ -18,13 +18,18 @@ either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
|||||||
driver).
|
driver).
|
||||||
|
|
||||||
|
|
||||||
## **[Automated Installer](010_INSTALL_AUTOMATED.md)**
|
## **[Automated Installer (Recommended)](010_INSTALL_AUTOMATED.md)**
|
||||||
✅ This is the recommended installation method for first-time users.
|
✅ This is the recommended installation method for first-time users.
|
||||||
|
|
||||||
This is a script that will install all of InvokeAI's essential
|
This is a script that will install all of InvokeAI's essential
|
||||||
third party libraries and InvokeAI itself. It includes access to a
|
third party libraries and InvokeAI itself.
|
||||||
"developer console" which will help us debug problems with you and
|
|
||||||
give you to access experimental features.
|
🖥️ **Download the latest installer .zip file here** : https://github.com/invoke-ai/InvokeAI/releases/latest
|
||||||
|
|
||||||
|
- *Look for the file labelled "InvokeAI-installer-v3.X.X.zip" at the bottom of the page*
|
||||||
|
- If you experience issues, read through the full [installation instructions](010_INSTALL_AUTOMATED.md) to make sure you have met all of the installation requirements. If you need more help, join the [Discord](discord.gg/invoke-ai) or create an issue on [Github](https://github.com/invoke-ai/InvokeAI).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## **[Manual Installation](020_INSTALL_MANUAL.md)**
|
## **[Manual Installation](020_INSTALL_MANUAL.md)**
|
||||||
This method is recommended for experienced users and developers.
|
This method is recommended for experienced users and developers.
|
||||||
|
@ -25,6 +25,7 @@ To use a community workflow, download the the `.json` node graph file and load i
|
|||||||
+ [GPT2RandomPromptMaker](#gpt2randompromptmaker)
|
+ [GPT2RandomPromptMaker](#gpt2randompromptmaker)
|
||||||
+ [Grid to Gif](#grid-to-gif)
|
+ [Grid to Gif](#grid-to-gif)
|
||||||
+ [Halftone](#halftone)
|
+ [Halftone](#halftone)
|
||||||
|
+ [Hand Refiner with MeshGraphormer](#hand-refiner-with-meshgraphormer)
|
||||||
+ [Image and Mask Composition Pack](#image-and-mask-composition-pack)
|
+ [Image and Mask Composition Pack](#image-and-mask-composition-pack)
|
||||||
+ [Image Dominant Color](#image-dominant-color)
|
+ [Image Dominant Color](#image-dominant-color)
|
||||||
+ [Image to Character Art Image Nodes](#image-to-character-art-image-nodes)
|
+ [Image to Character Art Image Nodes](#image-to-character-art-image-nodes)
|
||||||
@ -196,6 +197,18 @@ CMYK Halftone Output:
|
|||||||
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/c59c578f-db8e-4d66-8c66-2851752d75ea" width="300" />
|
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/c59c578f-db8e-4d66-8c66-2851752d75ea" width="300" />
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
|
|
||||||
|
### Hand Refiner with MeshGraphormer
|
||||||
|
|
||||||
|
**Description**: Hand Refiner takes in your image and automatically generates a fixed depth map for the hands along with a mask of the hands region that will conveniently allow you to use them along with ControlNet to fix the wonky hands generated by Stable Diffusion
|
||||||
|
|
||||||
|
**Node Link:** https://github.com/blessedcoolant/invoke_meshgraphormer
|
||||||
|
|
||||||
|
**View**
|
||||||
|
<img src="https://raw.githubusercontent.com/blessedcoolant/invoke_meshgraphormer/main/assets/preview.jpg" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
### Image and Mask Composition Pack
|
### Image and Mask Composition Pack
|
||||||
|
|
||||||
**Description:** This is a pack of nodes for composing masks and images, including a simple text mask creator and both image and latent offset nodes. The offsets wrap around, so these can be used in conjunction with the Seamless node to progressively generate centered on different parts of the seamless tiling.
|
**Description:** This is a pack of nodes for composing masks and images, including a simple text mask creator and both image and latent offset nodes. The offsets wrap around, so these can be used in conjunction with the Seamless node to progressively generate centered on different parts of the seamless tiling.
|
||||||
|
@ -13,46 +13,69 @@ We thank them for all of their time and hard work.
|
|||||||
|
|
||||||
- [Lincoln D. Stein](mailto:lincoln.stein@gmail.com)
|
- [Lincoln D. Stein](mailto:lincoln.stein@gmail.com)
|
||||||
|
|
||||||
## **Current core team**
|
## **Current Core Team**
|
||||||
|
|
||||||
* @lstein (Lincoln Stein) - Co-maintainer
|
* @lstein (Lincoln Stein) - Co-maintainer
|
||||||
* @blessedcoolant - Co-maintainer
|
* @blessedcoolant - Co-maintainer
|
||||||
* @hipsterusername (Kent Keirsey) - Co-maintainer, CEO, Positive Vibes
|
* @hipsterusername (Kent Keirsey) - Co-maintainer, CEO, Positive Vibes
|
||||||
* @psychedelicious (Spencer Mabrito) - Web Team Leader
|
* @psychedelicious (Spencer Mabrito) - Web Team Leader
|
||||||
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
* @chainchompa (Jennifer Player) - Web Development & Chain-Chomping
|
||||||
* @damian0815 - Attention Systems and Compel Maintainer
|
* @josh is toast (Josh Corbett) - Web Development
|
||||||
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
|
||||||
* @genomancer (Gregg Helt) - Controlnet support
|
|
||||||
* @StAlKeR7779 (Sergey Borisov) - Torch stack, ONNX, model management, optimization
|
|
||||||
* @cheerio (Mary Rogers) - Lead Engineer & Web App Development
|
* @cheerio (Mary Rogers) - Lead Engineer & Web App Development
|
||||||
|
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||||
|
* @sunija - Standalone version
|
||||||
|
* @genomancer (Gregg Helt) - Controlnet support
|
||||||
* @brandon (Brandon Rising) - Platform, Infrastructure, Backend Systems
|
* @brandon (Brandon Rising) - Platform, Infrastructure, Backend Systems
|
||||||
* @ryanjdick (Ryan Dick) - Machine Learning & Training
|
* @ryanjdick (Ryan Dick) - Machine Learning & Training
|
||||||
* @millu (Millun Atluri) - Community Manager, Documentation, Node-wrangler
|
* @JPPhoto - Core image generation nodes
|
||||||
* @chainchompa (Jennifer Player) - Web Development & Chain-Chomping
|
* @dunkeroni - Image generation backend
|
||||||
|
* @SkunkWorxDark - Image generation backend
|
||||||
* @keturn (Kevin Turner) - Diffusers
|
* @keturn (Kevin Turner) - Diffusers
|
||||||
|
* @millu (Millun Atluri) - Community Wizard, Documentation, Node-wrangler,
|
||||||
|
* @glimmerleaf (Devon Hopkins) - Community Wizard
|
||||||
* @gogurt enjoyer - Discord moderator and end user support
|
* @gogurt enjoyer - Discord moderator and end user support
|
||||||
* @whosawhatsis - Discord moderator and end user support
|
* @whosawhatsis - Discord moderator and end user support
|
||||||
* @dwinrger - Discord moderator and end user support
|
* @dwinrger - Discord moderator and end user support
|
||||||
* @526christian - Discord moderator and end user support
|
* @526christian - Discord moderator and end user support
|
||||||
|
* @harvester62 - Discord moderator and end user support
|
||||||
|
|
||||||
|
|
||||||
|
## **Honored Team Alumni**
|
||||||
|
|
||||||
|
* @StAlKeR7779 (Sergey Borisov) - Torch stack, ONNX, model management, optimization
|
||||||
|
* @damian0815 - Attention Systems and Compel Maintainer
|
||||||
|
* @netsvetaev (Artur) - Localization support
|
||||||
|
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
||||||
|
* @tildebyte - Installation and configuration
|
||||||
|
* @mauwii (Matthias Wilde) - Installation, release, continuous integration
|
||||||
|
|
||||||
|
|
||||||
## **Full List of Contributors by Commit Name**
|
## **Full List of Contributors by Commit Name**
|
||||||
|
|
||||||
|
- 이승석
|
||||||
- AbdBarho
|
- AbdBarho
|
||||||
- ablattmann
|
- ablattmann
|
||||||
- AdamOStark
|
- AdamOStark
|
||||||
- Adam Rice
|
- Adam Rice
|
||||||
- Airton Silva
|
- Airton Silva
|
||||||
|
- Aldo Hoeben
|
||||||
- Alexander Eichhorn
|
- Alexander Eichhorn
|
||||||
- Alexandre D. Roberge
|
- Alexandre D. Roberge
|
||||||
|
- Alexandre Macabies
|
||||||
|
- Alfie John
|
||||||
- Andreas Rozek
|
- Andreas Rozek
|
||||||
- Andre LaBranche
|
- Andre LaBranche
|
||||||
- Andy Bearman
|
- Andy Bearman
|
||||||
- Andy Luhrs
|
- Andy Luhrs
|
||||||
- Andy Pilate
|
- Andy Pilate
|
||||||
|
- Anonymous
|
||||||
|
- Anthony Monthe
|
||||||
- Any-Winter-4079
|
- Any-Winter-4079
|
||||||
- apolinario
|
- apolinario
|
||||||
|
- Ar7ific1al
|
||||||
- ArDiouscuros
|
- ArDiouscuros
|
||||||
- Armando C. Santisbon
|
- Armando C. Santisbon
|
||||||
|
- Arnold Cordewiner
|
||||||
- Arthur Holstvoogd
|
- Arthur Holstvoogd
|
||||||
- artmen1516
|
- artmen1516
|
||||||
- Artur
|
- Artur
|
||||||
@ -64,13 +87,16 @@ We thank them for all of their time and hard work.
|
|||||||
- blhook
|
- blhook
|
||||||
- BlueAmulet
|
- BlueAmulet
|
||||||
- Bouncyknighter
|
- Bouncyknighter
|
||||||
|
- Brandon
|
||||||
- Brandon Rising
|
- Brandon Rising
|
||||||
- Brent Ozar
|
- Brent Ozar
|
||||||
- Brian Racer
|
- Brian Racer
|
||||||
- bsilvereagle
|
- bsilvereagle
|
||||||
- c67e708d
|
- c67e708d
|
||||||
|
- camenduru
|
||||||
- CapableWeb
|
- CapableWeb
|
||||||
- Carson Katri
|
- Carson Katri
|
||||||
|
- chainchompa
|
||||||
- Chloe
|
- Chloe
|
||||||
- Chris Dawson
|
- Chris Dawson
|
||||||
- Chris Hayes
|
- Chris Hayes
|
||||||
@ -86,30 +112,45 @@ We thank them for all of their time and hard work.
|
|||||||
- cpacker
|
- cpacker
|
||||||
- Cragin Godley
|
- Cragin Godley
|
||||||
- creachec
|
- creachec
|
||||||
|
- CrypticWit
|
||||||
|
- d8ahazard
|
||||||
|
- damian
|
||||||
|
- damian0815
|
||||||
|
- Damian at mba
|
||||||
- Damian Stewart
|
- Damian Stewart
|
||||||
- Daniel Manzke
|
- Daniel Manzke
|
||||||
- Danny Beer
|
- Danny Beer
|
||||||
- Dan Sully
|
- Dan Sully
|
||||||
|
- Darren Ringer
|
||||||
- David Burnett
|
- David Burnett
|
||||||
- David Ford
|
- David Ford
|
||||||
- David Regla
|
- David Regla
|
||||||
|
- David Sisco
|
||||||
- David Wager
|
- David Wager
|
||||||
- Daya Adianto
|
- Daya Adianto
|
||||||
- db3000
|
- db3000
|
||||||
|
- DekitaRPG
|
||||||
- Denis Olshin
|
- Denis Olshin
|
||||||
- Dennis
|
- Dennis
|
||||||
|
- dependabot[bot]
|
||||||
|
- Dmitry Parnas
|
||||||
|
- Dobrynia100
|
||||||
- Dominic Letz
|
- Dominic Letz
|
||||||
- DrGunnarMallon
|
- DrGunnarMallon
|
||||||
|
- Drun555
|
||||||
|
- dunkeroni
|
||||||
- Edward Johan
|
- Edward Johan
|
||||||
- elliotsayes
|
- elliotsayes
|
||||||
- Elrik
|
- Elrik
|
||||||
- ElrikUnderlake
|
- ElrikUnderlake
|
||||||
- Eric Khun
|
- Eric Khun
|
||||||
- Eric Wolf
|
- Eric Wolf
|
||||||
|
- Eugene
|
||||||
- Eugene Brodsky
|
- Eugene Brodsky
|
||||||
- ExperimentalCyborg
|
- ExperimentalCyborg
|
||||||
- Fabian Bahl
|
- Fabian Bahl
|
||||||
- Fabio 'MrWHO' Torchetti
|
- Fabio 'MrWHO' Torchetti
|
||||||
|
- Fattire
|
||||||
- fattire
|
- fattire
|
||||||
- Felipe Nogueira
|
- Felipe Nogueira
|
||||||
- Félix Sanz
|
- Félix Sanz
|
||||||
@ -118,8 +159,12 @@ We thank them for all of their time and hard work.
|
|||||||
- gabrielrotbart
|
- gabrielrotbart
|
||||||
- gallegonovato
|
- gallegonovato
|
||||||
- Gérald LONLAS
|
- Gérald LONLAS
|
||||||
|
- Gille
|
||||||
- GitHub Actions Bot
|
- GitHub Actions Bot
|
||||||
|
- glibesyck
|
||||||
- gogurtenjoyer
|
- gogurtenjoyer
|
||||||
|
- Gohsuke Shimada
|
||||||
|
- greatwolf
|
||||||
- greentext2
|
- greentext2
|
||||||
- Gregg Helt
|
- Gregg Helt
|
||||||
- H4rk
|
- H4rk
|
||||||
@ -131,6 +176,7 @@ We thank them for all of their time and hard work.
|
|||||||
- Hosted Weblate
|
- Hosted Weblate
|
||||||
- Iman Karim
|
- Iman Karim
|
||||||
- ismail ihsan bülbül
|
- ismail ihsan bülbül
|
||||||
|
- ItzAttila
|
||||||
- Ivan Efimov
|
- Ivan Efimov
|
||||||
- jakehl
|
- jakehl
|
||||||
- Jakub Kolčář
|
- Jakub Kolčář
|
||||||
@ -141,6 +187,7 @@ We thank them for all of their time and hard work.
|
|||||||
- Jason Toffaletti
|
- Jason Toffaletti
|
||||||
- Jaulustus
|
- Jaulustus
|
||||||
- Jeff Mahoney
|
- Jeff Mahoney
|
||||||
|
- Jennifer Player
|
||||||
- jeremy
|
- jeremy
|
||||||
- Jeremy Clark
|
- Jeremy Clark
|
||||||
- JigenD
|
- JigenD
|
||||||
@ -148,19 +195,26 @@ We thank them for all of their time and hard work.
|
|||||||
- Johan Roxendal
|
- Johan Roxendal
|
||||||
- Johnathon Selstad
|
- Johnathon Selstad
|
||||||
- Jonathan
|
- Jonathan
|
||||||
|
- Jordan Hewitt
|
||||||
- Joseph Dries III
|
- Joseph Dries III
|
||||||
|
- Josh Corbett
|
||||||
- JPPhoto
|
- JPPhoto
|
||||||
- jspraul
|
- jspraul
|
||||||
|
- junzi
|
||||||
- Justin Wong
|
- Justin Wong
|
||||||
- Juuso V
|
- Juuso V
|
||||||
- Kaspar Emanuel
|
- Kaspar Emanuel
|
||||||
- Katsuyuki-Karasawa
|
- Katsuyuki-Karasawa
|
||||||
|
- Keerigan45
|
||||||
- Kent Keirsey
|
- Kent Keirsey
|
||||||
|
- Kevin Brack
|
||||||
- Kevin Coakley
|
- Kevin Coakley
|
||||||
- Kevin Gibbons
|
- Kevin Gibbons
|
||||||
- Kevin Schaul
|
- Kevin Schaul
|
||||||
- Kevin Turner
|
- Kevin Turner
|
||||||
|
- Kieran Klaassen
|
||||||
- krummrey
|
- krummrey
|
||||||
|
- Kyle
|
||||||
- Kyle Lacy
|
- Kyle Lacy
|
||||||
- Kyle Schouviller
|
- Kyle Schouviller
|
||||||
- Lawrence Norton
|
- Lawrence Norton
|
||||||
@ -171,10 +225,15 @@ We thank them for all of their time and hard work.
|
|||||||
- Lynne Whitehorn
|
- Lynne Whitehorn
|
||||||
- majick
|
- majick
|
||||||
- Marco Labarile
|
- Marco Labarile
|
||||||
|
- Marta Nahorniuk
|
||||||
- Martin Kristiansen
|
- Martin Kristiansen
|
||||||
|
- Mary Hipp
|
||||||
|
- maryhipp
|
||||||
- Mary Hipp Rogers
|
- Mary Hipp Rogers
|
||||||
|
- mastercaster
|
||||||
- mastercaster9000
|
- mastercaster9000
|
||||||
- Matthias Wild
|
- Matthias Wild
|
||||||
|
- mauwii
|
||||||
- michaelk71
|
- michaelk71
|
||||||
- mickr777
|
- mickr777
|
||||||
- Mihai
|
- Mihai
|
||||||
@ -182,11 +241,15 @@ We thank them for all of their time and hard work.
|
|||||||
- Mikhail Tishin
|
- Mikhail Tishin
|
||||||
- Millun Atluri
|
- Millun Atluri
|
||||||
- Minjune Song
|
- Minjune Song
|
||||||
|
- Mitchell Allain
|
||||||
- mitien
|
- mitien
|
||||||
- mofuzz
|
- mofuzz
|
||||||
- Muhammad Usama
|
- Muhammad Usama
|
||||||
- Name
|
- Name
|
||||||
- _nderscore
|
- _nderscore
|
||||||
|
- Neil Wang
|
||||||
|
- nekowaiz
|
||||||
|
- nemuruibai
|
||||||
- Netzer R
|
- Netzer R
|
||||||
- Nicholas Koh
|
- Nicholas Koh
|
||||||
- Nicholas Körfer
|
- Nicholas Körfer
|
||||||
@ -197,9 +260,11 @@ We thank them for all of their time and hard work.
|
|||||||
- ofirkris
|
- ofirkris
|
||||||
- Olivier Louvignes
|
- Olivier Louvignes
|
||||||
- owenvincent
|
- owenvincent
|
||||||
|
- pand4z31
|
||||||
- Patrick Esser
|
- Patrick Esser
|
||||||
- Patrick Tien
|
- Patrick Tien
|
||||||
- Patrick von Platen
|
- Patrick von Platen
|
||||||
|
- Paul Curry
|
||||||
- Paul Sajna
|
- Paul Sajna
|
||||||
- pejotr
|
- pejotr
|
||||||
- Peter Baylies
|
- Peter Baylies
|
||||||
@ -207,6 +272,7 @@ We thank them for all of their time and hard work.
|
|||||||
- plucked
|
- plucked
|
||||||
- prixt
|
- prixt
|
||||||
- psychedelicious
|
- psychedelicious
|
||||||
|
- psychedelicious@windows
|
||||||
- Rainer Bernhardt
|
- Rainer Bernhardt
|
||||||
- Riccardo Giovanetti
|
- Riccardo Giovanetti
|
||||||
- Rich Jones
|
- Rich Jones
|
||||||
@ -215,17 +281,22 @@ We thank them for all of their time and hard work.
|
|||||||
- Robert Bolender
|
- Robert Bolender
|
||||||
- Robin Rombach
|
- Robin Rombach
|
||||||
- Rohan Barar
|
- Rohan Barar
|
||||||
- rohinish404
|
- Rohinish
|
||||||
- rpagliuca
|
- rpagliuca
|
||||||
- rromb
|
- rromb
|
||||||
- Rupesh Sreeraman
|
- Rupesh Sreeraman
|
||||||
|
- Ryan
|
||||||
- Ryan Cao
|
- Ryan Cao
|
||||||
|
- Ryan Dick
|
||||||
- Saifeddine
|
- Saifeddine
|
||||||
- Saifeddine ALOUI
|
- Saifeddine ALOUI
|
||||||
|
- Sam
|
||||||
- SammCheese
|
- SammCheese
|
||||||
|
- Sam McLeod
|
||||||
- Sammy
|
- Sammy
|
||||||
- sammyf
|
- sammyf
|
||||||
- Samuel Husso
|
- Samuel Husso
|
||||||
|
- Saurav Maheshkar
|
||||||
- Scott Lahteine
|
- Scott Lahteine
|
||||||
- Sean McLellan
|
- Sean McLellan
|
||||||
- Sebastian Aigner
|
- Sebastian Aigner
|
||||||
@ -233,16 +304,21 @@ We thank them for all of their time and hard work.
|
|||||||
- Sergey Krashevich
|
- Sergey Krashevich
|
||||||
- Shapor Naghibzadeh
|
- Shapor Naghibzadeh
|
||||||
- Shawn Zhong
|
- Shawn Zhong
|
||||||
|
- Simona Liliac
|
||||||
- Simon Vans-Colina
|
- Simon Vans-Colina
|
||||||
- skunkworxdark
|
- skunkworxdark
|
||||||
- slashtechno
|
- slashtechno
|
||||||
|
- SoheilRezaei
|
||||||
|
- Song, Pengcheng
|
||||||
- spezialspezial
|
- spezialspezial
|
||||||
- ssantos
|
- ssantos
|
||||||
- StAlKeR7779
|
- StAlKeR7779
|
||||||
|
- Stefan Tobler
|
||||||
- Stephan Koglin-Fischer
|
- Stephan Koglin-Fischer
|
||||||
- SteveCaruso
|
- SteveCaruso
|
||||||
- Steve Martinelli
|
- Steve Martinelli
|
||||||
- Steven Frank
|
- Steven Frank
|
||||||
|
- Surisen
|
||||||
- System X - Files
|
- System X - Files
|
||||||
- Taylor Kems
|
- Taylor Kems
|
||||||
- techicode
|
- techicode
|
||||||
@ -261,26 +337,34 @@ We thank them for all of their time and hard work.
|
|||||||
- tyler
|
- tyler
|
||||||
- unknown
|
- unknown
|
||||||
- user1
|
- user1
|
||||||
|
- vedant-3010
|
||||||
- Vedant Madane
|
- Vedant Madane
|
||||||
- veprogames
|
- veprogames
|
||||||
- wa.code
|
- wa.code
|
||||||
- wfng92
|
- wfng92
|
||||||
|
- whjms
|
||||||
- whosawhatsis
|
- whosawhatsis
|
||||||
- Will
|
- Will
|
||||||
- William Becher
|
- William Becher
|
||||||
- William Chong
|
- William Chong
|
||||||
|
- Wilson E. Alvarez
|
||||||
|
- woweenie
|
||||||
|
- Wubbbi
|
||||||
- xra
|
- xra
|
||||||
- Yeung Yiu Hung
|
- Yeung Yiu Hung
|
||||||
- ymgenesis
|
- ymgenesis
|
||||||
- Yorzaren
|
- Yorzaren
|
||||||
- Yosuke Shinya
|
- Yosuke Shinya
|
||||||
- yun saki
|
- yun saki
|
||||||
|
- ZachNagengast
|
||||||
- Zadagu
|
- Zadagu
|
||||||
- zeptofine
|
- zeptofine
|
||||||
|
- Zerdoumi
|
||||||
|
- Васянатор
|
||||||
- 冯不游
|
- 冯不游
|
||||||
- 唐澤 克幸
|
- 唐澤 克幸
|
||||||
|
|
||||||
## **Original CompVis Authors**
|
## **Original CompVis (Stable Diffusion) Authors**
|
||||||
|
|
||||||
- [Robin Rombach](https://github.com/rromb)
|
- [Robin Rombach](https://github.com/rromb)
|
||||||
- [Patrick von Platen](https://github.com/patrickvonplaten)
|
- [Patrick von Platen](https://github.com/patrickvonplaten)
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -455,7 +455,7 @@ def get_torch_source() -> (Union[str, None], str):
|
|||||||
optional_modules = "[onnx]"
|
optional_modules = "[onnx]"
|
||||||
if OS == "Linux":
|
if OS == "Linux":
|
||||||
if device == "rocm":
|
if device == "rocm":
|
||||||
url = "https://download.pytorch.org/whl/rocm5.4.2"
|
url = "https://download.pytorch.org/whl/rocm5.6"
|
||||||
elif device == "cpu":
|
elif device == "cpu":
|
||||||
url = "https://download.pytorch.org/whl/cpu"
|
url = "https://download.pytorch.org/whl/cpu"
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
from logging import Logger
|
from logging import Logger
|
||||||
|
|
||||||
|
from invokeai.app.services.item_storage.item_storage_memory import ItemStorageMemory
|
||||||
from invokeai.app.services.shared.sqlite.sqlite_util import init_db
|
from invokeai.app.services.shared.sqlite.sqlite_util import init_db
|
||||||
from invokeai.backend.model_manager.metadata import ModelMetadataStore
|
from invokeai.backend.model_manager.metadata import ModelMetadataStore
|
||||||
from invokeai.backend.util.logging import InvokeAILogger
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
@ -22,7 +23,6 @@ from ..services.invocation_queue.invocation_queue_memory import MemoryInvocation
|
|||||||
from ..services.invocation_services import InvocationServices
|
from ..services.invocation_services import InvocationServices
|
||||||
from ..services.invocation_stats.invocation_stats_default import InvocationStatsService
|
from ..services.invocation_stats.invocation_stats_default import InvocationStatsService
|
||||||
from ..services.invoker import Invoker
|
from ..services.invoker import Invoker
|
||||||
from ..services.item_storage.item_storage_sqlite import SqliteItemStorage
|
|
||||||
from ..services.latents_storage.latents_storage_disk import DiskLatentsStorage
|
from ..services.latents_storage.latents_storage_disk import DiskLatentsStorage
|
||||||
from ..services.latents_storage.latents_storage_forward_cache import ForwardCacheLatentsStorage
|
from ..services.latents_storage.latents_storage_forward_cache import ForwardCacheLatentsStorage
|
||||||
from ..services.model_install import ModelInstallService
|
from ..services.model_install import ModelInstallService
|
||||||
@ -80,7 +80,7 @@ class ApiDependencies:
|
|||||||
board_records = SqliteBoardRecordStorage(db=db)
|
board_records = SqliteBoardRecordStorage(db=db)
|
||||||
boards = BoardService()
|
boards = BoardService()
|
||||||
events = FastAPIEventService(event_handler_id)
|
events = FastAPIEventService(event_handler_id)
|
||||||
graph_execution_manager = SqliteItemStorage[GraphExecutionState](db=db, table_name="graph_executions")
|
graph_execution_manager = ItemStorageMemory[GraphExecutionState]()
|
||||||
image_records = SqliteImageRecordStorage(db=db)
|
image_records = SqliteImageRecordStorage(db=db)
|
||||||
images = ImageService()
|
images = ImageService()
|
||||||
invocation_cache = MemoryInvocationCache(max_cache_size=config.node_cache_size)
|
invocation_cache = MemoryInvocationCache(max_cache_size=config.node_cache_size)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
# Copyright (c) 2023 Lincoln D. Stein
|
# Copyright (c) 2023 Lincoln D. Stein
|
||||||
"""FastAPI route for model configuration records."""
|
"""FastAPI route for model configuration records."""
|
||||||
|
|
||||||
|
import pathlib
|
||||||
from hashlib import sha1
|
from hashlib import sha1
|
||||||
from random import randbytes
|
from random import randbytes
|
||||||
from typing import Any, Dict, List, Optional, Set
|
from typing import Any, Dict, List, Optional, Set
|
||||||
@ -27,6 +27,7 @@ from invokeai.backend.model_manager.config import (
|
|||||||
ModelFormat,
|
ModelFormat,
|
||||||
ModelType,
|
ModelType,
|
||||||
)
|
)
|
||||||
|
from invokeai.backend.model_manager.merge import MergeInterpolationMethod, ModelMerger
|
||||||
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata
|
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata
|
||||||
|
|
||||||
from ..dependencies import ApiDependencies
|
from ..dependencies import ApiDependencies
|
||||||
@ -415,3 +416,57 @@ async def sync_models_to_config() -> Response:
|
|||||||
"""
|
"""
|
||||||
ApiDependencies.invoker.services.model_install.sync_to_config()
|
ApiDependencies.invoker.services.model_install.sync_to_config()
|
||||||
return Response(status_code=204)
|
return Response(status_code=204)
|
||||||
|
|
||||||
|
|
||||||
|
@model_records_router.put(
|
||||||
|
"/merge",
|
||||||
|
operation_id="merge",
|
||||||
|
)
|
||||||
|
async def merge(
|
||||||
|
keys: List[str] = Body(description="Keys for two to three models to merge", min_length=2, max_length=3),
|
||||||
|
merged_model_name: Optional[str] = Body(description="Name of destination model", default=None),
|
||||||
|
alpha: float = Body(description="Alpha weighting strength to apply to 2d and 3d models", default=0.5),
|
||||||
|
force: bool = Body(
|
||||||
|
description="Force merging of models created with different versions of diffusers",
|
||||||
|
default=False,
|
||||||
|
),
|
||||||
|
interp: Optional[MergeInterpolationMethod] = Body(description="Interpolation method", default=None),
|
||||||
|
merge_dest_directory: Optional[str] = Body(
|
||||||
|
description="Save the merged model to the designated directory (with 'merged_model_name' appended)",
|
||||||
|
default=None,
|
||||||
|
),
|
||||||
|
) -> AnyModelConfig:
|
||||||
|
"""
|
||||||
|
Merge diffusers models.
|
||||||
|
|
||||||
|
keys: List of 2-3 model keys to merge together. All models must use the same base type.
|
||||||
|
merged_model_name: Name for the merged model [Concat model names]
|
||||||
|
alpha: Alpha value (0.0-1.0). Higher values give more weight to the second model [0.5]
|
||||||
|
force: If true, force the merge even if the models were generated by different versions of the diffusers library [False]
|
||||||
|
interp: Interpolation method. One of "weighted_sum", "sigmoid", "inv_sigmoid" or "add_difference" [weighted_sum]
|
||||||
|
merge_dest_directory: Specify a directory to store the merged model in [models directory]
|
||||||
|
"""
|
||||||
|
print(f"here i am, keys={keys}")
|
||||||
|
logger = ApiDependencies.invoker.services.logger
|
||||||
|
try:
|
||||||
|
logger.info(f"Merging models: {keys} into {merge_dest_directory or '<MODELS>'}/{merged_model_name}")
|
||||||
|
dest = pathlib.Path(merge_dest_directory) if merge_dest_directory else None
|
||||||
|
installer = ApiDependencies.invoker.services.model_install
|
||||||
|
merger = ModelMerger(installer)
|
||||||
|
model_names = [installer.record_store.get_model(x).name for x in keys]
|
||||||
|
response = merger.merge_diffusion_models_and_save(
|
||||||
|
model_keys=keys,
|
||||||
|
merged_model_name=merged_model_name or "+".join(model_names),
|
||||||
|
alpha=alpha,
|
||||||
|
interp=interp,
|
||||||
|
force=force,
|
||||||
|
merge_dest_directory=dest,
|
||||||
|
)
|
||||||
|
except UnknownModelException:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=404,
|
||||||
|
detail=f"One or more of the models '{keys}' not found",
|
||||||
|
)
|
||||||
|
except ValueError as e:
|
||||||
|
raise HTTPException(status_code=400, detail=str(e))
|
||||||
|
return response
|
||||||
|
@ -30,6 +30,7 @@ from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
|||||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||||
from invokeai.app.shared.fields import FieldDescriptions
|
from invokeai.app.shared.fields import FieldDescriptions
|
||||||
|
from invokeai.backend.image_util.depth_anything import DepthAnythingDetector
|
||||||
|
|
||||||
from ...backend.model_management import BaseModelType
|
from ...backend.model_management import BaseModelType
|
||||||
from .baseinvocation import (
|
from .baseinvocation import (
|
||||||
@ -602,3 +603,33 @@ class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
|
|||||||
color_map = cv2.resize(color_map, (width, height), interpolation=cv2.INTER_NEAREST)
|
color_map = cv2.resize(color_map, (width, height), interpolation=cv2.INTER_NEAREST)
|
||||||
color_map = Image.fromarray(color_map)
|
color_map = Image.fromarray(color_map)
|
||||||
return color_map
|
return color_map
|
||||||
|
|
||||||
|
|
||||||
|
DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small"]
|
||||||
|
|
||||||
|
|
||||||
|
@invocation(
|
||||||
|
"depth_anything_image_processor",
|
||||||
|
title="Depth Anything Processor",
|
||||||
|
tags=["controlnet", "depth", "depth anything"],
|
||||||
|
category="controlnet",
|
||||||
|
version="1.0.0",
|
||||||
|
)
|
||||||
|
class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||||
|
"""Generates a depth map based on the Depth Anything algorithm"""
|
||||||
|
|
||||||
|
model_size: DEPTH_ANYTHING_MODEL_SIZES = InputField(
|
||||||
|
default="small", description="The size of the depth model to use"
|
||||||
|
)
|
||||||
|
resolution: int = InputField(default=512, ge=64, multiple_of=64, description=FieldDescriptions.image_res)
|
||||||
|
offload: bool = InputField(default=False)
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
depth_anything_detector = DepthAnythingDetector()
|
||||||
|
depth_anything_detector.load_model(model_size=self.model_size)
|
||||||
|
|
||||||
|
if image.mode == "RGBA":
|
||||||
|
image = image.convert("RGB")
|
||||||
|
|
||||||
|
processed_image = depth_anything_detector(image=image, resolution=self.resolution, offload=self.offload)
|
||||||
|
return processed_image
|
||||||
|
@ -251,7 +251,11 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
log_level : Literal["debug", "info", "warning", "error", "critical"] = Field(default="info", description="Emit logging messages at this level or higher", json_schema_extra=Categories.Logging)
|
log_level : Literal["debug", "info", "warning", "error", "critical"] = Field(default="info", description="Emit logging messages at this level or higher", json_schema_extra=Categories.Logging)
|
||||||
log_sql : bool = Field(default=False, description="Log SQL queries", json_schema_extra=Categories.Logging)
|
log_sql : bool = Field(default=False, description="Log SQL queries", json_schema_extra=Categories.Logging)
|
||||||
|
|
||||||
|
# Development
|
||||||
dev_reload : bool = Field(default=False, description="Automatically reload when Python sources are changed.", json_schema_extra=Categories.Development)
|
dev_reload : bool = Field(default=False, description="Automatically reload when Python sources are changed.", json_schema_extra=Categories.Development)
|
||||||
|
profile_graphs : bool = Field(default=False, description="Enable graph profiling", json_schema_extra=Categories.Development)
|
||||||
|
profile_prefix : Optional[str] = Field(default=None, description="An optional prefix for profile output files.", json_schema_extra=Categories.Development)
|
||||||
|
profiles_dir : Path = Field(default=Path('profiles'), description="Directory for graph profiles", json_schema_extra=Categories.Development)
|
||||||
|
|
||||||
version : bool = Field(default=False, description="Show InvokeAI version and exit", json_schema_extra=Categories.Other)
|
version : bool = Field(default=False, description="Show InvokeAI version and exit", json_schema_extra=Categories.Other)
|
||||||
|
|
||||||
@ -270,7 +274,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
attention_type : Literal["auto", "normal", "xformers", "sliced", "torch-sdp"] = Field(default="auto", description="Attention type", json_schema_extra=Categories.Generation)
|
attention_type : Literal["auto", "normal", "xformers", "sliced", "torch-sdp"] = Field(default="auto", description="Attention type", json_schema_extra=Categories.Generation)
|
||||||
attention_slice_size: Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', json_schema_extra=Categories.Generation)
|
attention_slice_size: Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', json_schema_extra=Categories.Generation)
|
||||||
force_tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", json_schema_extra=Categories.Generation)
|
force_tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", json_schema_extra=Categories.Generation)
|
||||||
png_compress_level : int = Field(default=6, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", json_schema_extra=Categories.Generation)
|
png_compress_level : int = Field(default=1, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", json_schema_extra=Categories.Generation)
|
||||||
|
|
||||||
# QUEUE
|
# QUEUE
|
||||||
max_queue_size : int = Field(default=10000, gt=0, description="Maximum number of items in the session queue", json_schema_extra=Categories.Queue)
|
max_queue_size : int = Field(default=10000, gt=0, description="Maximum number of items in the session queue", json_schema_extra=Categories.Queue)
|
||||||
@ -280,6 +284,9 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
deny_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to deny. Omit to deny none.", json_schema_extra=Categories.Nodes)
|
deny_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to deny. Omit to deny none.", json_schema_extra=Categories.Nodes)
|
||||||
node_cache_size : int = Field(default=512, description="How many cached nodes to keep in memory", json_schema_extra=Categories.Nodes)
|
node_cache_size : int = Field(default=512, description="How many cached nodes to keep in memory", json_schema_extra=Categories.Nodes)
|
||||||
|
|
||||||
|
# MODEL IMPORT
|
||||||
|
civitai_api_key : Optional[str] = Field(default=os.environ.get("CIVITAI_API_KEY"), description="API key for CivitAI", json_schema_extra=Categories.Other)
|
||||||
|
|
||||||
# DEPRECATED FIELDS - STILL HERE IN ORDER TO OBTAN VALUES FROM PRE-3.1 CONFIG FILES
|
# DEPRECATED FIELDS - STILL HERE IN ORDER TO OBTAN VALUES FROM PRE-3.1 CONFIG FILES
|
||||||
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", json_schema_extra=Categories.MemoryPerformance)
|
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", json_schema_extra=Categories.MemoryPerformance)
|
||||||
max_cache_size : Optional[float] = Field(default=None, gt=0, description="Maximum memory amount used by model cache for rapid switching", json_schema_extra=Categories.MemoryPerformance)
|
max_cache_size : Optional[float] = Field(default=None, gt=0, description="Maximum memory amount used by model cache for rapid switching", json_schema_extra=Categories.MemoryPerformance)
|
||||||
@ -289,6 +296,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
lora_dir : Optional[Path] = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', json_schema_extra=Categories.Paths)
|
lora_dir : Optional[Path] = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||||
embedding_dir : Optional[Path] = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
embedding_dir : Optional[Path] = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||||
controlnet_dir : Optional[Path] = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
controlnet_dir : Optional[Path] = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||||
|
|
||||||
# this is not referred to in the source code and can be removed entirely
|
# this is not referred to in the source code and can be removed entirely
|
||||||
#free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", json_schema_extra=Categories.MemoryPerformance)
|
#free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", json_schema_extra=Categories.MemoryPerformance)
|
||||||
|
|
||||||
@ -449,6 +457,11 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
disabled_in_config = not self.xformers_enabled
|
disabled_in_config = not self.xformers_enabled
|
||||||
return disabled_in_config and self.attention_type != "xformers"
|
return disabled_in_config and self.attention_type != "xformers"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def profiles_path(self) -> Path:
|
||||||
|
"""Path to the graph profiles directory."""
|
||||||
|
return self._resolve(self.profiles_dir)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def find_root() -> Path:
|
def find_root() -> Path:
|
||||||
"""Choose the runtime root directory when not specified on command line or init file."""
|
"""Choose the runtime root directory when not specified on command line or init file."""
|
||||||
|
@ -208,7 +208,6 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
|||||||
job = self._queue.get(timeout=1)
|
job = self._queue.get(timeout=1)
|
||||||
except Empty:
|
except Empty:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
try:
|
try:
|
||||||
job.job_started = get_iso_timestamp()
|
job.job_started = get_iso_timestamp()
|
||||||
self._do_download(job)
|
self._do_download(job)
|
||||||
|
@ -1,11 +1,16 @@
|
|||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
|
from contextlib import suppress
|
||||||
from threading import BoundedSemaphore, Event, Thread
|
from threading import BoundedSemaphore, Event, Thread
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
from invokeai.app.invocations.baseinvocation import InvocationContext
|
from invokeai.app.invocations.baseinvocation import InvocationContext
|
||||||
from invokeai.app.services.invocation_queue.invocation_queue_common import InvocationQueueItem
|
from invokeai.app.services.invocation_queue.invocation_queue_common import InvocationQueueItem
|
||||||
|
from invokeai.app.services.invocation_stats.invocation_stats_common import (
|
||||||
|
GESStatsNotFoundError,
|
||||||
|
)
|
||||||
|
from invokeai.app.util.profiler import Profiler
|
||||||
|
|
||||||
from ..invoker import Invoker
|
from ..invoker import Invoker
|
||||||
from .invocation_processor_base import InvocationProcessorABC
|
from .invocation_processor_base import InvocationProcessorABC
|
||||||
@ -18,7 +23,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
|||||||
__invoker: Invoker
|
__invoker: Invoker
|
||||||
__threadLimit: BoundedSemaphore
|
__threadLimit: BoundedSemaphore
|
||||||
|
|
||||||
def start(self, invoker) -> None:
|
def start(self, invoker: Invoker) -> None:
|
||||||
# if we do want multithreading at some point, we could make this configurable
|
# if we do want multithreading at some point, we could make this configurable
|
||||||
self.__threadLimit = BoundedSemaphore(1)
|
self.__threadLimit = BoundedSemaphore(1)
|
||||||
self.__invoker = invoker
|
self.__invoker = invoker
|
||||||
@ -39,6 +44,16 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
|||||||
self.__threadLimit.acquire()
|
self.__threadLimit.acquire()
|
||||||
queue_item: Optional[InvocationQueueItem] = None
|
queue_item: Optional[InvocationQueueItem] = None
|
||||||
|
|
||||||
|
profiler = (
|
||||||
|
Profiler(
|
||||||
|
logger=self.__invoker.services.logger,
|
||||||
|
output_dir=self.__invoker.services.configuration.profiles_path,
|
||||||
|
prefix=self.__invoker.services.configuration.profile_prefix,
|
||||||
|
)
|
||||||
|
if self.__invoker.services.configuration.profile_graphs
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
|
||||||
while not stop_event.is_set():
|
while not stop_event.is_set():
|
||||||
try:
|
try:
|
||||||
queue_item = self.__invoker.services.queue.get()
|
queue_item = self.__invoker.services.queue.get()
|
||||||
@ -49,6 +64,10 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
|||||||
# do not hammer the queue
|
# do not hammer the queue
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if profiler and profiler.profile_id != queue_item.graph_execution_state_id:
|
||||||
|
profiler.start(profile_id=queue_item.graph_execution_state_id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
graph_execution_state = self.__invoker.services.graph_execution_manager.get(
|
graph_execution_state = self.__invoker.services.graph_execution_manager.get(
|
||||||
queue_item.graph_execution_state_id
|
queue_item.graph_execution_state_id
|
||||||
@ -137,7 +156,8 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
except CanceledException:
|
except CanceledException:
|
||||||
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
|
with suppress(GESStatsNotFoundError):
|
||||||
|
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -162,7 +182,8 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
|||||||
error_type=e.__class__.__name__,
|
error_type=e.__class__.__name__,
|
||||||
error=error,
|
error=error,
|
||||||
)
|
)
|
||||||
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
|
with suppress(GESStatsNotFoundError):
|
||||||
|
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# Check queue to see if this is canceled, and skip if so
|
# Check queue to see if this is canceled, and skip if so
|
||||||
@ -194,13 +215,21 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
|||||||
error=traceback.format_exc(),
|
error=traceback.format_exc(),
|
||||||
)
|
)
|
||||||
elif is_complete:
|
elif is_complete:
|
||||||
self.__invoker.services.performance_statistics.log_stats(graph_execution_state.id)
|
with suppress(GESStatsNotFoundError):
|
||||||
self.__invoker.services.events.emit_graph_execution_complete(
|
self.__invoker.services.performance_statistics.log_stats(graph_execution_state.id)
|
||||||
queue_batch_id=queue_item.session_queue_batch_id,
|
self.__invoker.services.events.emit_graph_execution_complete(
|
||||||
queue_item_id=queue_item.session_queue_item_id,
|
queue_batch_id=queue_item.session_queue_batch_id,
|
||||||
queue_id=queue_item.session_queue_id,
|
queue_item_id=queue_item.session_queue_item_id,
|
||||||
graph_execution_state_id=graph_execution_state.id,
|
queue_id=queue_item.session_queue_id,
|
||||||
)
|
graph_execution_state_id=graph_execution_state.id,
|
||||||
|
)
|
||||||
|
if profiler:
|
||||||
|
profile_path = profiler.stop()
|
||||||
|
stats_path = profile_path.with_suffix(".json")
|
||||||
|
self.__invoker.services.performance_statistics.dump_stats(
|
||||||
|
graph_execution_state_id=graph_execution_state.id, output_path=stats_path
|
||||||
|
)
|
||||||
|
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
|
||||||
|
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
pass # Log something? KeyboardInterrupt is probably not going to be seen by the processor
|
pass # Log something? KeyboardInterrupt is probably not going to be seen by the processor
|
||||||
|
@ -30,8 +30,10 @@ writes to the system log is stored in InvocationServices.performance_statistics.
|
|||||||
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from contextlib import AbstractContextManager
|
from contextlib import AbstractContextManager
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
||||||
|
from invokeai.app.services.invocation_stats.invocation_stats_common import InvocationStatsSummary
|
||||||
|
|
||||||
|
|
||||||
class InvocationStatsServiceBase(ABC):
|
class InvocationStatsServiceBase(ABC):
|
||||||
@ -61,8 +63,9 @@ class InvocationStatsServiceBase(ABC):
|
|||||||
@abstractmethod
|
@abstractmethod
|
||||||
def reset_stats(self, graph_execution_state_id: str):
|
def reset_stats(self, graph_execution_state_id: str):
|
||||||
"""
|
"""
|
||||||
Reset all statistics for the indicated graph
|
Reset all statistics for the indicated graph.
|
||||||
:param graph_execution_state_id
|
:param graph_execution_state_id: The id of the session whose stats to reset.
|
||||||
|
:raises GESStatsNotFoundError: if the graph isn't tracked in the stats.
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -70,5 +73,26 @@ class InvocationStatsServiceBase(ABC):
|
|||||||
def log_stats(self, graph_execution_state_id: str):
|
def log_stats(self, graph_execution_state_id: str):
|
||||||
"""
|
"""
|
||||||
Write out the accumulated statistics to the log or somewhere else.
|
Write out the accumulated statistics to the log or somewhere else.
|
||||||
|
:param graph_execution_state_id: The id of the session whose stats to log.
|
||||||
|
:raises GESStatsNotFoundError: if the graph isn't tracked in the stats.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_stats(self, graph_execution_state_id: str) -> InvocationStatsSummary:
|
||||||
|
"""
|
||||||
|
Gets the accumulated statistics for the indicated graph.
|
||||||
|
:param graph_execution_state_id: The id of the session whose stats to get.
|
||||||
|
:raises GESStatsNotFoundError: if the graph isn't tracked in the stats.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def dump_stats(self, graph_execution_state_id: str, output_path: Path) -> None:
|
||||||
|
"""
|
||||||
|
Write out the accumulated statistics to the indicated path as JSON.
|
||||||
|
:param graph_execution_state_id: The id of the session whose stats to dump.
|
||||||
|
:param output_path: The file to write the stats to.
|
||||||
|
:raises GESStatsNotFoundError: if the graph isn't tracked in the stats.
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
@ -1,5 +1,91 @@
|
|||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from dataclasses import dataclass
|
from dataclasses import asdict, dataclass
|
||||||
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
|
||||||
|
class GESStatsNotFoundError(Exception):
|
||||||
|
"""Raised when execution stats are not found for a given Graph Execution State."""
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class NodeExecutionStatsSummary:
|
||||||
|
"""The stats for a specific type of node."""
|
||||||
|
|
||||||
|
node_type: str
|
||||||
|
num_calls: int
|
||||||
|
time_used_seconds: float
|
||||||
|
peak_vram_gb: float
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ModelCacheStatsSummary:
|
||||||
|
"""The stats for the model cache."""
|
||||||
|
|
||||||
|
high_water_mark_gb: float
|
||||||
|
cache_size_gb: float
|
||||||
|
total_usage_gb: float
|
||||||
|
cache_hits: int
|
||||||
|
cache_misses: int
|
||||||
|
models_cached: int
|
||||||
|
models_cleared: int
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class GraphExecutionStatsSummary:
|
||||||
|
"""The stats for the graph execution state."""
|
||||||
|
|
||||||
|
graph_execution_state_id: str
|
||||||
|
execution_time_seconds: float
|
||||||
|
# `wall_time_seconds`, `ram_usage_gb` and `ram_change_gb` are derived from the node execution stats.
|
||||||
|
# In some situations, there are no node stats, so these values are optional.
|
||||||
|
wall_time_seconds: Optional[float]
|
||||||
|
ram_usage_gb: Optional[float]
|
||||||
|
ram_change_gb: Optional[float]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class InvocationStatsSummary:
|
||||||
|
"""
|
||||||
|
The accumulated stats for a graph execution.
|
||||||
|
Its `__str__` method returns a human-readable stats summary.
|
||||||
|
"""
|
||||||
|
|
||||||
|
vram_usage_gb: Optional[float]
|
||||||
|
graph_stats: GraphExecutionStatsSummary
|
||||||
|
model_cache_stats: ModelCacheStatsSummary
|
||||||
|
node_stats: list[NodeExecutionStatsSummary]
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
_str = ""
|
||||||
|
_str = f"Graph stats: {self.graph_stats.graph_execution_state_id}\n"
|
||||||
|
_str += f"{'Node':>30} {'Calls':>7} {'Seconds':>9} {'VRAM Used':>10}\n"
|
||||||
|
|
||||||
|
for summary in self.node_stats:
|
||||||
|
_str += f"{summary.node_type:>30} {summary.num_calls:>7} {summary.time_used_seconds:>8.3f}s {summary.peak_vram_gb:>9.3f}G\n"
|
||||||
|
|
||||||
|
_str += f"TOTAL GRAPH EXECUTION TIME: {self.graph_stats.execution_time_seconds:7.3f}s\n"
|
||||||
|
|
||||||
|
if self.graph_stats.wall_time_seconds is not None:
|
||||||
|
_str += f"TOTAL GRAPH WALL TIME: {self.graph_stats.wall_time_seconds:7.3f}s\n"
|
||||||
|
|
||||||
|
if self.graph_stats.ram_usage_gb is not None and self.graph_stats.ram_change_gb is not None:
|
||||||
|
_str += f"RAM used by InvokeAI process: {self.graph_stats.ram_usage_gb:4.2f}G ({self.graph_stats.ram_change_gb:+5.3f}G)\n"
|
||||||
|
|
||||||
|
_str += f"RAM used to load models: {self.model_cache_stats.total_usage_gb:4.2f}G\n"
|
||||||
|
if self.vram_usage_gb:
|
||||||
|
_str += f"VRAM in use: {self.vram_usage_gb:4.3f}G\n"
|
||||||
|
_str += "RAM cache statistics:\n"
|
||||||
|
_str += f" Model cache hits: {self.model_cache_stats.cache_hits}\n"
|
||||||
|
_str += f" Model cache misses: {self.model_cache_stats.cache_misses}\n"
|
||||||
|
_str += f" Models cached: {self.model_cache_stats.models_cached}\n"
|
||||||
|
_str += f" Models cleared from cache: {self.model_cache_stats.models_cleared}\n"
|
||||||
|
_str += f" Cache high water mark: {self.model_cache_stats.high_water_mark_gb:4.2f}/{self.model_cache_stats.cache_size_gb:4.2f}G\n"
|
||||||
|
|
||||||
|
return _str
|
||||||
|
|
||||||
|
def as_dict(self) -> dict[str, Any]:
|
||||||
|
"""Returns the stats as a dictionary."""
|
||||||
|
return asdict(self)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@ -55,12 +141,33 @@ class GraphExecutionStats:
|
|||||||
|
|
||||||
return last_node
|
return last_node
|
||||||
|
|
||||||
def get_pretty_log(self, graph_execution_state_id: str) -> str:
|
def get_graph_stats_summary(self, graph_execution_state_id: str) -> GraphExecutionStatsSummary:
|
||||||
log = f"Graph stats: {graph_execution_state_id}\n"
|
"""Get a summary of the graph stats."""
|
||||||
log += f"{'Node':>30} {'Calls':>7}{'Seconds':>9} {'VRAM Used':>10}\n"
|
first_node = self.get_first_node_stats()
|
||||||
|
last_node = self.get_last_node_stats()
|
||||||
|
|
||||||
# Log stats aggregated by node type.
|
wall_time_seconds: Optional[float] = None
|
||||||
|
ram_usage_gb: Optional[float] = None
|
||||||
|
ram_change_gb: Optional[float] = None
|
||||||
|
|
||||||
|
if last_node and first_node:
|
||||||
|
wall_time_seconds = last_node.end_time - first_node.start_time
|
||||||
|
ram_usage_gb = last_node.end_ram_gb
|
||||||
|
ram_change_gb = last_node.end_ram_gb - first_node.start_ram_gb
|
||||||
|
|
||||||
|
return GraphExecutionStatsSummary(
|
||||||
|
graph_execution_state_id=graph_execution_state_id,
|
||||||
|
execution_time_seconds=self.get_total_run_time(),
|
||||||
|
wall_time_seconds=wall_time_seconds,
|
||||||
|
ram_usage_gb=ram_usage_gb,
|
||||||
|
ram_change_gb=ram_change_gb,
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_node_stats_summaries(self) -> list[NodeExecutionStatsSummary]:
|
||||||
|
"""Get a summary of the node stats."""
|
||||||
|
summaries: list[NodeExecutionStatsSummary] = []
|
||||||
node_stats_by_type: dict[str, list[NodeExecutionStats]] = defaultdict(list)
|
node_stats_by_type: dict[str, list[NodeExecutionStats]] = defaultdict(list)
|
||||||
|
|
||||||
for node_stats in self._node_stats_list:
|
for node_stats in self._node_stats_list:
|
||||||
node_stats_by_type[node_stats.invocation_type].append(node_stats)
|
node_stats_by_type[node_stats.invocation_type].append(node_stats)
|
||||||
|
|
||||||
@ -68,17 +175,9 @@ class GraphExecutionStats:
|
|||||||
num_calls = len(node_type_stats_list)
|
num_calls = len(node_type_stats_list)
|
||||||
time_used = sum([n.total_time() for n in node_type_stats_list])
|
time_used = sum([n.total_time() for n in node_type_stats_list])
|
||||||
peak_vram = max([n.peak_vram_gb for n in node_type_stats_list])
|
peak_vram = max([n.peak_vram_gb for n in node_type_stats_list])
|
||||||
log += f"{node_type:>30} {num_calls:>4} {time_used:7.3f}s {peak_vram:4.3f}G\n"
|
summary = NodeExecutionStatsSummary(
|
||||||
|
node_type=node_type, num_calls=num_calls, time_used_seconds=time_used, peak_vram_gb=peak_vram
|
||||||
|
)
|
||||||
|
summaries.append(summary)
|
||||||
|
|
||||||
# Log stats for the entire graph.
|
return summaries
|
||||||
log += f"TOTAL GRAPH EXECUTION TIME: {self.get_total_run_time():7.3f}s\n"
|
|
||||||
|
|
||||||
first_node = self.get_first_node_stats()
|
|
||||||
last_node = self.get_last_node_stats()
|
|
||||||
if first_node is not None and last_node is not None:
|
|
||||||
total_wall_time = last_node.end_time - first_node.start_time
|
|
||||||
ram_change = last_node.end_ram_gb - first_node.start_ram_gb
|
|
||||||
log += f"TOTAL GRAPH WALL TIME: {total_wall_time:7.3f}s\n"
|
|
||||||
log += f"RAM used by InvokeAI process: {last_node.end_ram_gb:4.2f}G ({ram_change:+5.3f}G)\n"
|
|
||||||
|
|
||||||
return log
|
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
|
import json
|
||||||
import time
|
import time
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
import psutil
|
import psutil
|
||||||
import torch
|
import torch
|
||||||
@ -10,7 +12,15 @@ from invokeai.app.services.invoker import Invoker
|
|||||||
from invokeai.backend.model_management.model_cache import CacheStats
|
from invokeai.backend.model_management.model_cache import CacheStats
|
||||||
|
|
||||||
from .invocation_stats_base import InvocationStatsServiceBase
|
from .invocation_stats_base import InvocationStatsServiceBase
|
||||||
from .invocation_stats_common import GraphExecutionStats, NodeExecutionStats
|
from .invocation_stats_common import (
|
||||||
|
GESStatsNotFoundError,
|
||||||
|
GraphExecutionStats,
|
||||||
|
GraphExecutionStatsSummary,
|
||||||
|
InvocationStatsSummary,
|
||||||
|
ModelCacheStatsSummary,
|
||||||
|
NodeExecutionStats,
|
||||||
|
NodeExecutionStatsSummary,
|
||||||
|
)
|
||||||
|
|
||||||
# Size of 1GB in bytes.
|
# Size of 1GB in bytes.
|
||||||
GB = 2**30
|
GB = 2**30
|
||||||
@ -95,31 +105,66 @@ class InvocationStatsService(InvocationStatsServiceBase):
|
|||||||
del self._stats[graph_execution_state_id]
|
del self._stats[graph_execution_state_id]
|
||||||
del self._cache_stats[graph_execution_state_id]
|
del self._cache_stats[graph_execution_state_id]
|
||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
logger.warning(f"Attempted to clear statistics for unknown graph {graph_execution_state_id}: {e}.")
|
msg = f"Attempted to clear statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||||
|
logger.error(msg)
|
||||||
|
raise GESStatsNotFoundError(msg) from e
|
||||||
|
|
||||||
def log_stats(self, graph_execution_state_id: str):
|
def get_stats(self, graph_execution_state_id: str) -> InvocationStatsSummary:
|
||||||
|
graph_stats_summary = self._get_graph_summary(graph_execution_state_id)
|
||||||
|
node_stats_summaries = self._get_node_summaries(graph_execution_state_id)
|
||||||
|
model_cache_stats_summary = self._get_model_cache_summary(graph_execution_state_id)
|
||||||
|
vram_usage_gb = torch.cuda.memory_allocated() / GB if torch.cuda.is_available() else None
|
||||||
|
|
||||||
|
return InvocationStatsSummary(
|
||||||
|
graph_stats=graph_stats_summary,
|
||||||
|
model_cache_stats=model_cache_stats_summary,
|
||||||
|
node_stats=node_stats_summaries,
|
||||||
|
vram_usage_gb=vram_usage_gb,
|
||||||
|
)
|
||||||
|
|
||||||
|
def log_stats(self, graph_execution_state_id: str) -> None:
|
||||||
|
stats = self.get_stats(graph_execution_state_id)
|
||||||
|
logger.info(str(stats))
|
||||||
|
|
||||||
|
def dump_stats(self, graph_execution_state_id: str, output_path: Path) -> None:
|
||||||
|
stats = self.get_stats(graph_execution_state_id)
|
||||||
|
with open(output_path, "w") as f:
|
||||||
|
f.write(json.dumps(stats.as_dict(), indent=2))
|
||||||
|
|
||||||
|
def _get_model_cache_summary(self, graph_execution_state_id: str) -> ModelCacheStatsSummary:
|
||||||
try:
|
try:
|
||||||
graph_stats = self._stats[graph_execution_state_id]
|
|
||||||
cache_stats = self._cache_stats[graph_execution_state_id]
|
cache_stats = self._cache_stats[graph_execution_state_id]
|
||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
logger.warning(f"Attempted to log statistics for unknown graph {graph_execution_state_id}: {e}.")
|
msg = f"Attempted to get model cache statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||||
return
|
logger.error(msg)
|
||||||
|
raise GESStatsNotFoundError(msg) from e
|
||||||
|
|
||||||
log = graph_stats.get_pretty_log(graph_execution_state_id)
|
return ModelCacheStatsSummary(
|
||||||
|
cache_hits=cache_stats.hits,
|
||||||
|
cache_misses=cache_stats.misses,
|
||||||
|
high_water_mark_gb=cache_stats.high_watermark / GB,
|
||||||
|
cache_size_gb=cache_stats.cache_size / GB,
|
||||||
|
total_usage_gb=sum(list(cache_stats.loaded_model_sizes.values())) / GB,
|
||||||
|
models_cached=cache_stats.in_cache,
|
||||||
|
models_cleared=cache_stats.cleared,
|
||||||
|
)
|
||||||
|
|
||||||
hwm = cache_stats.high_watermark / GB
|
def _get_graph_summary(self, graph_execution_state_id: str) -> GraphExecutionStatsSummary:
|
||||||
tot = cache_stats.cache_size / GB
|
try:
|
||||||
loaded = sum(list(cache_stats.loaded_model_sizes.values())) / GB
|
graph_stats = self._stats[graph_execution_state_id]
|
||||||
log += f"RAM used to load models: {loaded:4.2f}G\n"
|
except KeyError as e:
|
||||||
if torch.cuda.is_available():
|
msg = f"Attempted to get graph statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||||
log += f"VRAM in use: {(torch.cuda.memory_allocated() / GB):4.3f}G\n"
|
logger.error(msg)
|
||||||
log += "RAM cache statistics:\n"
|
raise GESStatsNotFoundError(msg) from e
|
||||||
log += f" Model cache hits: {cache_stats.hits}\n"
|
|
||||||
log += f" Model cache misses: {cache_stats.misses}\n"
|
|
||||||
log += f" Models cached: {cache_stats.in_cache}\n"
|
|
||||||
log += f" Models cleared from cache: {cache_stats.cleared}\n"
|
|
||||||
log += f" Cache high water mark: {hwm:4.2f}/{tot:4.2f}G\n"
|
|
||||||
logger.info(log)
|
|
||||||
|
|
||||||
del self._stats[graph_execution_state_id]
|
return graph_stats.get_graph_stats_summary(graph_execution_state_id)
|
||||||
del self._cache_stats[graph_execution_state_id]
|
|
||||||
|
def _get_node_summaries(self, graph_execution_state_id: str) -> list[NodeExecutionStatsSummary]:
|
||||||
|
try:
|
||||||
|
graph_stats = self._stats[graph_execution_state_id]
|
||||||
|
except KeyError as e:
|
||||||
|
msg = f"Attempted to get node statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||||
|
logger.error(msg)
|
||||||
|
raise GESStatsNotFoundError(msg) from e
|
||||||
|
|
||||||
|
return graph_stats.get_node_stats_summaries()
|
||||||
|
@ -1,10 +1,8 @@
|
|||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from typing import Callable, Generic, Optional, TypeVar
|
from typing import Callable, Generic, TypeVar
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from invokeai.app.services.shared.pagination import PaginatedResults
|
|
||||||
|
|
||||||
T = TypeVar("T", bound=BaseModel)
|
T = TypeVar("T", bound=BaseModel)
|
||||||
|
|
||||||
|
|
||||||
@ -25,23 +23,14 @@ class ItemStorageABC(ABC, Generic[T]):
|
|||||||
"""Gets the item, parsing it into a Pydantic model"""
|
"""Gets the item, parsing it into a Pydantic model"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def get_raw(self, item_id: str) -> Optional[str]:
|
|
||||||
"""Gets the raw item as a string, skipping Pydantic parsing"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def set(self, item: T) -> None:
|
def set(self, item: T) -> None:
|
||||||
"""Sets the item"""
|
"""Sets the item"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def list(self, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
|
def delete(self, item_id: str) -> None:
|
||||||
"""Gets a paginated list of items"""
|
"""Deletes the item"""
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def search(self, query: str, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def on_changed(self, on_changed: Callable[[T], None]) -> None:
|
def on_changed(self, on_changed: Callable[[T], None]) -> None:
|
||||||
|
50
invokeai/app/services/item_storage/item_storage_memory.py
Normal file
50
invokeai/app/services/item_storage/item_storage_memory.py
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
from collections import OrderedDict
|
||||||
|
from contextlib import suppress
|
||||||
|
from typing import Generic, Optional, TypeVar
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from invokeai.app.services.item_storage.item_storage_base import ItemStorageABC
|
||||||
|
|
||||||
|
T = TypeVar("T", bound=BaseModel)
|
||||||
|
|
||||||
|
|
||||||
|
class ItemStorageMemory(ItemStorageABC, Generic[T]):
|
||||||
|
"""
|
||||||
|
Provides a simple in-memory storage for items, with a maximum number of items to store.
|
||||||
|
The storage uses the LRU strategy to evict items from storage when the max has been reached.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, id_field: str = "id", max_items: int = 10) -> None:
|
||||||
|
super().__init__()
|
||||||
|
if max_items < 1:
|
||||||
|
raise ValueError("max_items must be at least 1")
|
||||||
|
if not id_field:
|
||||||
|
raise ValueError("id_field must not be empty")
|
||||||
|
self._id_field = id_field
|
||||||
|
self._items: OrderedDict[str, T] = OrderedDict()
|
||||||
|
self._max_items = max_items
|
||||||
|
|
||||||
|
def get(self, item_id: str) -> Optional[T]:
|
||||||
|
# If the item exists, move it to the end of the OrderedDict.
|
||||||
|
item = self._items.pop(item_id, None)
|
||||||
|
if item is not None:
|
||||||
|
self._items[item_id] = item
|
||||||
|
return self._items.get(item_id)
|
||||||
|
|
||||||
|
def set(self, item: T) -> None:
|
||||||
|
item_id = getattr(item, self._id_field)
|
||||||
|
if item_id in self._items:
|
||||||
|
# If item already exists, remove it and add it to the end
|
||||||
|
self._items.pop(item_id)
|
||||||
|
elif len(self._items) >= self._max_items:
|
||||||
|
# If cache is full, evict the least recently used item
|
||||||
|
self._items.popitem(last=False)
|
||||||
|
self._items[item_id] = item
|
||||||
|
self._on_changed(item)
|
||||||
|
|
||||||
|
def delete(self, item_id: str) -> None:
|
||||||
|
# This is a no-op if the item doesn't exist.
|
||||||
|
with suppress(KeyError):
|
||||||
|
del self._items[item_id]
|
||||||
|
self._on_deleted(item_id)
|
@ -1,147 +0,0 @@
|
|||||||
import sqlite3
|
|
||||||
import threading
|
|
||||||
from typing import Generic, Optional, TypeVar, get_args
|
|
||||||
|
|
||||||
from pydantic import BaseModel, TypeAdapter
|
|
||||||
|
|
||||||
from invokeai.app.services.shared.pagination import PaginatedResults
|
|
||||||
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
|
||||||
|
|
||||||
from .item_storage_base import ItemStorageABC
|
|
||||||
|
|
||||||
T = TypeVar("T", bound=BaseModel)
|
|
||||||
|
|
||||||
|
|
||||||
class SqliteItemStorage(ItemStorageABC, Generic[T]):
|
|
||||||
_table_name: str
|
|
||||||
_conn: sqlite3.Connection
|
|
||||||
_cursor: sqlite3.Cursor
|
|
||||||
_id_field: str
|
|
||||||
_lock: threading.RLock
|
|
||||||
_validator: Optional[TypeAdapter[T]]
|
|
||||||
|
|
||||||
def __init__(self, db: SqliteDatabase, table_name: str, id_field: str = "id"):
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
self._lock = db.lock
|
|
||||||
self._conn = db.conn
|
|
||||||
self._table_name = table_name
|
|
||||||
self._id_field = id_field # TODO: validate that T has this field
|
|
||||||
self._cursor = self._conn.cursor()
|
|
||||||
self._validator: Optional[TypeAdapter[T]] = None
|
|
||||||
|
|
||||||
self._create_table()
|
|
||||||
|
|
||||||
def _create_table(self):
|
|
||||||
try:
|
|
||||||
self._lock.acquire()
|
|
||||||
self._cursor.execute(
|
|
||||||
f"""CREATE TABLE IF NOT EXISTS {self._table_name} (
|
|
||||||
item TEXT,
|
|
||||||
id TEXT GENERATED ALWAYS AS (json_extract(item, '$.{self._id_field}')) VIRTUAL NOT NULL);"""
|
|
||||||
)
|
|
||||||
self._cursor.execute(
|
|
||||||
f"""CREATE UNIQUE INDEX IF NOT EXISTS {self._table_name}_id ON {self._table_name}(id);"""
|
|
||||||
)
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
|
|
||||||
def _parse_item(self, item: str) -> T:
|
|
||||||
if self._validator is None:
|
|
||||||
"""
|
|
||||||
We don't get access to `__orig_class__` in `__init__()`, and we need this before start(), so
|
|
||||||
we can create it when it is first needed instead.
|
|
||||||
__orig_class__ is technically an implementation detail of the typing module, not a supported API
|
|
||||||
"""
|
|
||||||
self._validator = TypeAdapter(get_args(self.__orig_class__)[0]) # type: ignore [attr-defined]
|
|
||||||
return self._validator.validate_json(item)
|
|
||||||
|
|
||||||
def set(self, item: T):
|
|
||||||
try:
|
|
||||||
self._lock.acquire()
|
|
||||||
self._cursor.execute(
|
|
||||||
f"""INSERT OR REPLACE INTO {self._table_name} (item) VALUES (?);""",
|
|
||||||
(item.model_dump_json(warnings=False, exclude_none=True),),
|
|
||||||
)
|
|
||||||
self._conn.commit()
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
self._on_changed(item)
|
|
||||||
|
|
||||||
def get(self, id: str) -> Optional[T]:
|
|
||||||
try:
|
|
||||||
self._lock.acquire()
|
|
||||||
self._cursor.execute(f"""SELECT item FROM {self._table_name} WHERE id = ?;""", (str(id),))
|
|
||||||
result = self._cursor.fetchone()
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
|
|
||||||
if not result:
|
|
||||||
return None
|
|
||||||
|
|
||||||
return self._parse_item(result[0])
|
|
||||||
|
|
||||||
def get_raw(self, id: str) -> Optional[str]:
|
|
||||||
try:
|
|
||||||
self._lock.acquire()
|
|
||||||
self._cursor.execute(f"""SELECT item FROM {self._table_name} WHERE id = ?;""", (str(id),))
|
|
||||||
result = self._cursor.fetchone()
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
|
|
||||||
if not result:
|
|
||||||
return None
|
|
||||||
|
|
||||||
return result[0]
|
|
||||||
|
|
||||||
def delete(self, id: str):
|
|
||||||
try:
|
|
||||||
self._lock.acquire()
|
|
||||||
self._cursor.execute(f"""DELETE FROM {self._table_name} WHERE id = ?;""", (str(id),))
|
|
||||||
self._conn.commit()
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
self._on_deleted(id)
|
|
||||||
|
|
||||||
def list(self, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
|
|
||||||
try:
|
|
||||||
self._lock.acquire()
|
|
||||||
self._cursor.execute(
|
|
||||||
f"""SELECT item FROM {self._table_name} LIMIT ? OFFSET ?;""",
|
|
||||||
(per_page, page * per_page),
|
|
||||||
)
|
|
||||||
result = self._cursor.fetchall()
|
|
||||||
|
|
||||||
items = [self._parse_item(r[0]) for r in result]
|
|
||||||
|
|
||||||
self._cursor.execute(f"""SELECT count(*) FROM {self._table_name};""")
|
|
||||||
count = self._cursor.fetchone()[0]
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
|
|
||||||
pageCount = int(count / per_page) + 1
|
|
||||||
|
|
||||||
return PaginatedResults[T](items=items, page=page, pages=pageCount, per_page=per_page, total=count)
|
|
||||||
|
|
||||||
def search(self, query: str, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
|
|
||||||
try:
|
|
||||||
self._lock.acquire()
|
|
||||||
self._cursor.execute(
|
|
||||||
f"""SELECT item FROM {self._table_name} WHERE item LIKE ? LIMIT ? OFFSET ?;""",
|
|
||||||
(f"%{query}%", per_page, page * per_page),
|
|
||||||
)
|
|
||||||
result = self._cursor.fetchall()
|
|
||||||
|
|
||||||
items = [self._parse_item(r[0]) for r in result]
|
|
||||||
|
|
||||||
self._cursor.execute(
|
|
||||||
f"""SELECT count(*) FROM {self._table_name} WHERE item LIKE ?;""",
|
|
||||||
(f"%{query}%",),
|
|
||||||
)
|
|
||||||
count = self._cursor.fetchone()[0]
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
|
|
||||||
pageCount = int(count / per_page) + 1
|
|
||||||
|
|
||||||
return PaginatedResults[T](items=items, page=page, pages=pageCount, per_page=per_page, total=count)
|
|
@ -165,8 +165,8 @@ class ModelInstallJob(BaseModel):
|
|||||||
)
|
)
|
||||||
source: ModelSource = Field(description="Source (URL, repo_id, or local path) of model")
|
source: ModelSource = Field(description="Source (URL, repo_id, or local path) of model")
|
||||||
local_path: Path = Field(description="Path to locally-downloaded model; may be the same as the source")
|
local_path: Path = Field(description="Path to locally-downloaded model; may be the same as the source")
|
||||||
bytes: Optional[int] = Field(
|
bytes: int = Field(
|
||||||
default=None, description="For a remote model, the number of bytes downloaded so far (may not be available)"
|
default=0, description="For a remote model, the number of bytes downloaded so far (may not be available)"
|
||||||
)
|
)
|
||||||
total_bytes: int = Field(default=0, description="Total size of the model to be installed")
|
total_bytes: int = Field(default=0, description="Total size of the model to be installed")
|
||||||
source_metadata: Optional[AnyModelRepoMetadata] = Field(
|
source_metadata: Optional[AnyModelRepoMetadata] = Field(
|
||||||
|
@ -535,19 +535,19 @@ class ModelInstallService(ModelInstallServiceBase):
|
|||||||
def _import_from_url(self, source: URLModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
|
def _import_from_url(self, source: URLModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
|
||||||
# URLs from Civitai or HuggingFace will be handled specially
|
# URLs from Civitai or HuggingFace will be handled specially
|
||||||
url_patterns = {
|
url_patterns = {
|
||||||
r"https?://civitai.com/": CivitaiMetadataFetch,
|
r"^https?://civitai.com/": CivitaiMetadataFetch,
|
||||||
r"https?://huggingface.co/": HuggingFaceMetadataFetch,
|
r"^https?://huggingface.co/[^/]+/[^/]+$": HuggingFaceMetadataFetch,
|
||||||
}
|
}
|
||||||
metadata = None
|
metadata = None
|
||||||
for pattern, fetcher in url_patterns.items():
|
for pattern, fetcher in url_patterns.items():
|
||||||
if re.match(pattern, str(source.url), re.IGNORECASE):
|
if re.match(pattern, str(source.url), re.IGNORECASE):
|
||||||
metadata = fetcher(self._session).from_url(source.url)
|
metadata = fetcher(self._session).from_url(source.url)
|
||||||
break
|
break
|
||||||
|
self._logger.debug(f"metadata={metadata}")
|
||||||
if metadata and isinstance(metadata, ModelMetadataWithFiles):
|
if metadata and isinstance(metadata, ModelMetadataWithFiles):
|
||||||
remote_files = metadata.download_urls(session=self._session)
|
remote_files = metadata.download_urls(session=self._session)
|
||||||
else:
|
else:
|
||||||
remote_files = [RemoteModelFile(url=source.url, path=Path("."), size=0)]
|
remote_files = [RemoteModelFile(url=source.url, path=Path("."), size=0)]
|
||||||
|
|
||||||
return self._import_remote_model(
|
return self._import_remote_model(
|
||||||
source=source,
|
source=source,
|
||||||
config=config,
|
config=config,
|
||||||
@ -586,6 +586,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
|||||||
assert install_job.total_bytes is not None # to avoid type checking complaints in the loop below
|
assert install_job.total_bytes is not None # to avoid type checking complaints in the loop below
|
||||||
|
|
||||||
self._logger.info(f"Queuing {source} for downloading")
|
self._logger.info(f"Queuing {source} for downloading")
|
||||||
|
self._logger.debug(f"remote_files={remote_files}")
|
||||||
for model_file in remote_files:
|
for model_file in remote_files:
|
||||||
url = model_file.url
|
url = model_file.url
|
||||||
path = model_file.path
|
path = model_file.path
|
||||||
|
@ -7,6 +7,7 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_1 import
|
|||||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_2 import build_migration_2
|
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_2 import build_migration_2
|
||||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_3 import build_migration_3
|
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_3 import build_migration_3
|
||||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_4 import build_migration_4
|
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_4 import build_migration_4
|
||||||
|
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_5 import build_migration_5
|
||||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
|
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
|
||||||
|
|
||||||
|
|
||||||
@ -31,6 +32,7 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
|
|||||||
migrator.register_migration(build_migration_2(image_files=image_files, logger=logger))
|
migrator.register_migration(build_migration_2(image_files=image_files, logger=logger))
|
||||||
migrator.register_migration(build_migration_3(app_config=config, logger=logger))
|
migrator.register_migration(build_migration_3(app_config=config, logger=logger))
|
||||||
migrator.register_migration(build_migration_4())
|
migrator.register_migration(build_migration_4())
|
||||||
|
migrator.register_migration(build_migration_5())
|
||||||
migrator.run_migrations()
|
migrator.run_migrations()
|
||||||
|
|
||||||
return db
|
return db
|
||||||
|
@ -0,0 +1,34 @@
|
|||||||
|
import sqlite3
|
||||||
|
|
||||||
|
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||||
|
|
||||||
|
|
||||||
|
class Migration5Callback:
|
||||||
|
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||||
|
self._drop_graph_executions(cursor)
|
||||||
|
|
||||||
|
def _drop_graph_executions(self, cursor: sqlite3.Cursor) -> None:
|
||||||
|
"""Drops the `graph_executions` table."""
|
||||||
|
|
||||||
|
cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
DROP TABLE IF EXISTS graph_executions;
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def build_migration_5() -> Migration:
|
||||||
|
"""
|
||||||
|
Build the migration from database version 4 to 5.
|
||||||
|
|
||||||
|
Introduced in v3.6.3, this migration:
|
||||||
|
- Drops the `graph_executions` table. We are able to do this because we are moving the graph storage
|
||||||
|
to be purely in-memory.
|
||||||
|
"""
|
||||||
|
migration_5 = Migration(
|
||||||
|
from_version=4,
|
||||||
|
to_version=5,
|
||||||
|
callback=Migration5Callback(),
|
||||||
|
)
|
||||||
|
|
||||||
|
return migration_5
|
@ -72,7 +72,12 @@ class MigrateModelYamlToDb1:
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
base_type, model_type, model_name = str(model_key).split("/")
|
base_type, model_type, model_name = str(model_key).split("/")
|
||||||
hash = FastModelHash.hash(self.config.models_path / stanza.path)
|
try:
|
||||||
|
hash = FastModelHash.hash(self.config.models_path / stanza.path)
|
||||||
|
except OSError:
|
||||||
|
self.logger.warning(f"The model at {stanza.path} is not a valid file or directory. Skipping migration.")
|
||||||
|
continue
|
||||||
|
|
||||||
assert isinstance(model_key, str)
|
assert isinstance(model_key, str)
|
||||||
new_key = sha1(model_key.encode("utf-8")).hexdigest()
|
new_key = sha1(model_key.encode("utf-8")).hexdigest()
|
||||||
|
|
||||||
|
@ -31,6 +31,7 @@ class WorkflowRecordOrderBy(str, Enum, metaclass=MetaEnum):
|
|||||||
class WorkflowCategory(str, Enum, metaclass=MetaEnum):
|
class WorkflowCategory(str, Enum, metaclass=MetaEnum):
|
||||||
User = "user"
|
User = "user"
|
||||||
Default = "default"
|
Default = "default"
|
||||||
|
Project = "project"
|
||||||
|
|
||||||
|
|
||||||
class WorkflowMeta(BaseModel):
|
class WorkflowMeta(BaseModel):
|
||||||
|
67
invokeai/app/util/profiler.py
Normal file
67
invokeai/app/util/profiler.py
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
import cProfile
|
||||||
|
from logging import Logger
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
|
class Profiler:
|
||||||
|
"""
|
||||||
|
Simple wrapper around cProfile.
|
||||||
|
|
||||||
|
Usage
|
||||||
|
```
|
||||||
|
# Create a profiler
|
||||||
|
profiler = Profiler(logger, output_dir, "sql_query_perf")
|
||||||
|
# Start a new profile
|
||||||
|
profiler.start("my_profile")
|
||||||
|
# Do stuff
|
||||||
|
profiler.stop()
|
||||||
|
```
|
||||||
|
|
||||||
|
Visualize a profile as a flamegraph with [snakeviz](https://jiffyclub.github.io/snakeviz/)
|
||||||
|
```sh
|
||||||
|
snakeviz my_profile.prof
|
||||||
|
```
|
||||||
|
|
||||||
|
Visualize a profile as directed graph with [graphviz](https://graphviz.org/download/) & [gprof2dot](https://github.com/jrfonseca/gprof2dot)
|
||||||
|
```sh
|
||||||
|
gprof2dot -f pstats my_profile.prof | dot -Tpng -o my_profile.png
|
||||||
|
# SVG or PDF may be nicer - you can search for function names
|
||||||
|
gprof2dot -f pstats my_profile.prof | dot -Tsvg -o my_profile.svg
|
||||||
|
gprof2dot -f pstats my_profile.prof | dot -Tpdf -o my_profile.pdf
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, logger: Logger, output_dir: Path, prefix: Optional[str] = None) -> None:
|
||||||
|
self._logger = logger.getChild(f"profiler.{prefix}" if prefix else "profiler")
|
||||||
|
self._output_dir = output_dir
|
||||||
|
self._output_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
self._profiler: Optional[cProfile.Profile] = None
|
||||||
|
self._prefix = prefix
|
||||||
|
|
||||||
|
self.profile_id: Optional[str] = None
|
||||||
|
|
||||||
|
def start(self, profile_id: str) -> None:
|
||||||
|
if self._profiler:
|
||||||
|
self.stop()
|
||||||
|
|
||||||
|
self.profile_id = profile_id
|
||||||
|
|
||||||
|
self._profiler = cProfile.Profile()
|
||||||
|
self._profiler.enable()
|
||||||
|
self._logger.info(f"Started profiling {self.profile_id}.")
|
||||||
|
|
||||||
|
def stop(self) -> Path:
|
||||||
|
if not self._profiler:
|
||||||
|
raise RuntimeError("Profiler not initialized. Call start() first.")
|
||||||
|
self._profiler.disable()
|
||||||
|
|
||||||
|
filename = f"{self._prefix}_{self.profile_id}.prof" if self._prefix else f"{self.profile_id}.prof"
|
||||||
|
path = Path(self._output_dir, filename)
|
||||||
|
|
||||||
|
self._profiler.dump_stats(path)
|
||||||
|
self._logger.info(f"Stopped profiling, profile dumped to {path}.")
|
||||||
|
self._profiler = None
|
||||||
|
self.profile_id = None
|
||||||
|
|
||||||
|
return path
|
109
invokeai/backend/image_util/depth_anything/__init__.py
Normal file
109
invokeai/backend/image_util/depth_anything/__init__.py
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
import pathlib
|
||||||
|
from typing import Literal, Union
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
import torch.nn.functional as F
|
||||||
|
from einops import repeat
|
||||||
|
from PIL import Image
|
||||||
|
from torchvision.transforms import Compose
|
||||||
|
|
||||||
|
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||||
|
from invokeai.backend.image_util.depth_anything.model.dpt import DPT_DINOv2
|
||||||
|
from invokeai.backend.image_util.depth_anything.utilities.util import NormalizeImage, PrepareForNet, Resize
|
||||||
|
from invokeai.backend.util.devices import choose_torch_device
|
||||||
|
from invokeai.backend.util.util import download_with_progress_bar
|
||||||
|
|
||||||
|
config = InvokeAIAppConfig.get_config()
|
||||||
|
|
||||||
|
DEPTH_ANYTHING_MODELS = {
|
||||||
|
"large": {
|
||||||
|
"url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitl14.pth?download=true",
|
||||||
|
"local": "any/annotators/depth_anything/depth_anything_vitl14.pth",
|
||||||
|
},
|
||||||
|
"base": {
|
||||||
|
"url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitb14.pth?download=true",
|
||||||
|
"local": "any/annotators/depth_anything/depth_anything_vitb14.pth",
|
||||||
|
},
|
||||||
|
"small": {
|
||||||
|
"url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vits14.pth?download=true",
|
||||||
|
"local": "any/annotators/depth_anything/depth_anything_vits14.pth",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
transform = Compose(
|
||||||
|
[
|
||||||
|
Resize(
|
||||||
|
width=518,
|
||||||
|
height=518,
|
||||||
|
resize_target=False,
|
||||||
|
keep_aspect_ratio=True,
|
||||||
|
ensure_multiple_of=14,
|
||||||
|
resize_method="lower_bound",
|
||||||
|
image_interpolation_method=cv2.INTER_CUBIC,
|
||||||
|
),
|
||||||
|
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
||||||
|
PrepareForNet(),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class DepthAnythingDetector:
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.model = None
|
||||||
|
self.model_size: Union[Literal["large", "base", "small"], None] = None
|
||||||
|
|
||||||
|
def load_model(self, model_size=Literal["large", "base", "small"]):
|
||||||
|
DEPTH_ANYTHING_MODEL_PATH = pathlib.Path(config.models_path / DEPTH_ANYTHING_MODELS[model_size]["local"])
|
||||||
|
if not DEPTH_ANYTHING_MODEL_PATH.exists():
|
||||||
|
download_with_progress_bar(DEPTH_ANYTHING_MODELS[model_size]["url"], DEPTH_ANYTHING_MODEL_PATH)
|
||||||
|
|
||||||
|
if not self.model or model_size != self.model_size:
|
||||||
|
del self.model
|
||||||
|
self.model_size = model_size
|
||||||
|
|
||||||
|
match self.model_size:
|
||||||
|
case "small":
|
||||||
|
self.model = DPT_DINOv2(encoder="vits", features=64, out_channels=[48, 96, 192, 384])
|
||||||
|
case "base":
|
||||||
|
self.model = DPT_DINOv2(encoder="vitb", features=128, out_channels=[96, 192, 384, 768])
|
||||||
|
case "large":
|
||||||
|
self.model = DPT_DINOv2(encoder="vitl", features=256, out_channels=[256, 512, 1024, 1024])
|
||||||
|
case _:
|
||||||
|
raise TypeError("Not a supported model")
|
||||||
|
|
||||||
|
self.model.load_state_dict(torch.load(DEPTH_ANYTHING_MODEL_PATH.as_posix(), map_location="cpu"))
|
||||||
|
self.model.eval()
|
||||||
|
|
||||||
|
self.model.to(choose_torch_device())
|
||||||
|
return self.model
|
||||||
|
|
||||||
|
def to(self, device):
|
||||||
|
self.model.to(device)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __call__(self, image, resolution=512, offload=False):
|
||||||
|
image = np.array(image, dtype=np.uint8)
|
||||||
|
image = image[:, :, ::-1] / 255.0
|
||||||
|
|
||||||
|
image_height, image_width = image.shape[:2]
|
||||||
|
image = transform({"image": image})["image"]
|
||||||
|
image = torch.from_numpy(image).unsqueeze(0).to(choose_torch_device())
|
||||||
|
|
||||||
|
with torch.no_grad():
|
||||||
|
depth = self.model(image)
|
||||||
|
depth = F.interpolate(depth[None], (image_height, image_width), mode="bilinear", align_corners=False)[0, 0]
|
||||||
|
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
|
||||||
|
|
||||||
|
depth_map = repeat(depth, "h w -> h w 3").cpu().numpy().astype(np.uint8)
|
||||||
|
depth_map = Image.fromarray(depth_map)
|
||||||
|
|
||||||
|
new_height = int(image_height * (resolution / image_width))
|
||||||
|
depth_map = depth_map.resize((resolution, new_height))
|
||||||
|
|
||||||
|
if offload:
|
||||||
|
del self.model
|
||||||
|
|
||||||
|
return depth_map
|
145
invokeai/backend/image_util/depth_anything/model/blocks.py
Normal file
145
invokeai/backend/image_util/depth_anything/model/blocks.py
Normal file
@ -0,0 +1,145 @@
|
|||||||
|
import torch.nn as nn
|
||||||
|
|
||||||
|
|
||||||
|
def _make_scratch(in_shape, out_shape, groups=1, expand=False):
|
||||||
|
scratch = nn.Module()
|
||||||
|
|
||||||
|
out_shape1 = out_shape
|
||||||
|
out_shape2 = out_shape
|
||||||
|
out_shape3 = out_shape
|
||||||
|
if len(in_shape) >= 4:
|
||||||
|
out_shape4 = out_shape
|
||||||
|
|
||||||
|
if expand:
|
||||||
|
out_shape1 = out_shape
|
||||||
|
out_shape2 = out_shape * 2
|
||||||
|
out_shape3 = out_shape * 4
|
||||||
|
if len(in_shape) >= 4:
|
||||||
|
out_shape4 = out_shape * 8
|
||||||
|
|
||||||
|
scratch.layer1_rn = nn.Conv2d(
|
||||||
|
in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||||
|
)
|
||||||
|
scratch.layer2_rn = nn.Conv2d(
|
||||||
|
in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||||
|
)
|
||||||
|
scratch.layer3_rn = nn.Conv2d(
|
||||||
|
in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||||
|
)
|
||||||
|
if len(in_shape) >= 4:
|
||||||
|
scratch.layer4_rn = nn.Conv2d(
|
||||||
|
in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||||
|
)
|
||||||
|
|
||||||
|
return scratch
|
||||||
|
|
||||||
|
|
||||||
|
class ResidualConvUnit(nn.Module):
|
||||||
|
"""Residual convolution module."""
|
||||||
|
|
||||||
|
def __init__(self, features, activation, bn):
|
||||||
|
"""Init.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
features (int): number of features
|
||||||
|
"""
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.bn = bn
|
||||||
|
|
||||||
|
self.groups = 1
|
||||||
|
|
||||||
|
self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
|
||||||
|
|
||||||
|
self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
|
||||||
|
|
||||||
|
if self.bn:
|
||||||
|
self.bn1 = nn.BatchNorm2d(features)
|
||||||
|
self.bn2 = nn.BatchNorm2d(features)
|
||||||
|
|
||||||
|
self.activation = activation
|
||||||
|
|
||||||
|
self.skip_add = nn.quantized.FloatFunctional()
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
"""Forward pass.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
x (tensor): input
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tensor: output
|
||||||
|
"""
|
||||||
|
|
||||||
|
out = self.activation(x)
|
||||||
|
out = self.conv1(out)
|
||||||
|
if self.bn:
|
||||||
|
out = self.bn1(out)
|
||||||
|
|
||||||
|
out = self.activation(out)
|
||||||
|
out = self.conv2(out)
|
||||||
|
if self.bn:
|
||||||
|
out = self.bn2(out)
|
||||||
|
|
||||||
|
if self.groups > 1:
|
||||||
|
out = self.conv_merge(out)
|
||||||
|
|
||||||
|
return self.skip_add.add(out, x)
|
||||||
|
|
||||||
|
|
||||||
|
class FeatureFusionBlock(nn.Module):
|
||||||
|
"""Feature fusion block."""
|
||||||
|
|
||||||
|
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None):
|
||||||
|
"""Init.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
features (int): number of features
|
||||||
|
"""
|
||||||
|
super(FeatureFusionBlock, self).__init__()
|
||||||
|
|
||||||
|
self.deconv = deconv
|
||||||
|
self.align_corners = align_corners
|
||||||
|
|
||||||
|
self.groups = 1
|
||||||
|
|
||||||
|
self.expand = expand
|
||||||
|
out_features = features
|
||||||
|
if self.expand:
|
||||||
|
out_features = features // 2
|
||||||
|
|
||||||
|
self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
|
||||||
|
|
||||||
|
self.resConfUnit1 = ResidualConvUnit(features, activation, bn)
|
||||||
|
self.resConfUnit2 = ResidualConvUnit(features, activation, bn)
|
||||||
|
|
||||||
|
self.skip_add = nn.quantized.FloatFunctional()
|
||||||
|
|
||||||
|
self.size = size
|
||||||
|
|
||||||
|
def forward(self, *xs, size=None):
|
||||||
|
"""Forward pass.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tensor: output
|
||||||
|
"""
|
||||||
|
output = xs[0]
|
||||||
|
|
||||||
|
if len(xs) == 2:
|
||||||
|
res = self.resConfUnit1(xs[1])
|
||||||
|
output = self.skip_add.add(output, res)
|
||||||
|
|
||||||
|
output = self.resConfUnit2(output)
|
||||||
|
|
||||||
|
if (size is None) and (self.size is None):
|
||||||
|
modifier = {"scale_factor": 2}
|
||||||
|
elif size is None:
|
||||||
|
modifier = {"size": self.size}
|
||||||
|
else:
|
||||||
|
modifier = {"size": size}
|
||||||
|
|
||||||
|
output = nn.functional.interpolate(output, **modifier, mode="bilinear", align_corners=self.align_corners)
|
||||||
|
|
||||||
|
output = self.out_conv(output)
|
||||||
|
|
||||||
|
return output
|
183
invokeai/backend/image_util/depth_anything/model/dpt.py
Normal file
183
invokeai/backend/image_util/depth_anything/model/dpt.py
Normal file
@ -0,0 +1,183 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.nn.functional as F
|
||||||
|
|
||||||
|
from .blocks import FeatureFusionBlock, _make_scratch
|
||||||
|
|
||||||
|
torchhub_path = Path(__file__).parent.parent / "torchhub"
|
||||||
|
|
||||||
|
|
||||||
|
def _make_fusion_block(features, use_bn, size=None):
|
||||||
|
return FeatureFusionBlock(
|
||||||
|
features,
|
||||||
|
nn.ReLU(False),
|
||||||
|
deconv=False,
|
||||||
|
bn=use_bn,
|
||||||
|
expand=False,
|
||||||
|
align_corners=True,
|
||||||
|
size=size,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class DPTHead(nn.Module):
|
||||||
|
def __init__(self, nclass, in_channels, features, out_channels, use_bn=False, use_clstoken=False):
|
||||||
|
super(DPTHead, self).__init__()
|
||||||
|
|
||||||
|
self.nclass = nclass
|
||||||
|
self.use_clstoken = use_clstoken
|
||||||
|
|
||||||
|
self.projects = nn.ModuleList(
|
||||||
|
[
|
||||||
|
nn.Conv2d(
|
||||||
|
in_channels=in_channels,
|
||||||
|
out_channels=out_channel,
|
||||||
|
kernel_size=1,
|
||||||
|
stride=1,
|
||||||
|
padding=0,
|
||||||
|
)
|
||||||
|
for out_channel in out_channels
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
self.resize_layers = nn.ModuleList(
|
||||||
|
[
|
||||||
|
nn.ConvTranspose2d(
|
||||||
|
in_channels=out_channels[0], out_channels=out_channels[0], kernel_size=4, stride=4, padding=0
|
||||||
|
),
|
||||||
|
nn.ConvTranspose2d(
|
||||||
|
in_channels=out_channels[1], out_channels=out_channels[1], kernel_size=2, stride=2, padding=0
|
||||||
|
),
|
||||||
|
nn.Identity(),
|
||||||
|
nn.Conv2d(
|
||||||
|
in_channels=out_channels[3], out_channels=out_channels[3], kernel_size=3, stride=2, padding=1
|
||||||
|
),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
if use_clstoken:
|
||||||
|
self.readout_projects = nn.ModuleList()
|
||||||
|
for _ in range(len(self.projects)):
|
||||||
|
self.readout_projects.append(nn.Sequential(nn.Linear(2 * in_channels, in_channels), nn.GELU()))
|
||||||
|
|
||||||
|
self.scratch = _make_scratch(
|
||||||
|
out_channels,
|
||||||
|
features,
|
||||||
|
groups=1,
|
||||||
|
expand=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.scratch.stem_transpose = None
|
||||||
|
|
||||||
|
self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
|
||||||
|
self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
|
||||||
|
self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
|
||||||
|
self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
|
||||||
|
|
||||||
|
head_features_1 = features
|
||||||
|
head_features_2 = 32
|
||||||
|
|
||||||
|
if nclass > 1:
|
||||||
|
self.scratch.output_conv = nn.Sequential(
|
||||||
|
nn.Conv2d(head_features_1, head_features_1, kernel_size=3, stride=1, padding=1),
|
||||||
|
nn.ReLU(True),
|
||||||
|
nn.Conv2d(head_features_1, nclass, kernel_size=1, stride=1, padding=0),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.scratch.output_conv1 = nn.Conv2d(
|
||||||
|
head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1
|
||||||
|
)
|
||||||
|
|
||||||
|
self.scratch.output_conv2 = nn.Sequential(
|
||||||
|
nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1),
|
||||||
|
nn.ReLU(True),
|
||||||
|
nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0),
|
||||||
|
nn.ReLU(True),
|
||||||
|
nn.Identity(),
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, out_features, patch_h, patch_w):
|
||||||
|
out = []
|
||||||
|
for i, x in enumerate(out_features):
|
||||||
|
if self.use_clstoken:
|
||||||
|
x, cls_token = x[0], x[1]
|
||||||
|
readout = cls_token.unsqueeze(1).expand_as(x)
|
||||||
|
x = self.readout_projects[i](torch.cat((x, readout), -1))
|
||||||
|
else:
|
||||||
|
x = x[0]
|
||||||
|
|
||||||
|
x = x.permute(0, 2, 1).reshape((x.shape[0], x.shape[-1], patch_h, patch_w))
|
||||||
|
|
||||||
|
x = self.projects[i](x)
|
||||||
|
x = self.resize_layers[i](x)
|
||||||
|
|
||||||
|
out.append(x)
|
||||||
|
|
||||||
|
layer_1, layer_2, layer_3, layer_4 = out
|
||||||
|
|
||||||
|
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
||||||
|
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
||||||
|
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
||||||
|
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
||||||
|
|
||||||
|
path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:])
|
||||||
|
path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:])
|
||||||
|
path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:])
|
||||||
|
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
||||||
|
|
||||||
|
out = self.scratch.output_conv1(path_1)
|
||||||
|
out = F.interpolate(out, (int(patch_h * 14), int(patch_w * 14)), mode="bilinear", align_corners=True)
|
||||||
|
out = self.scratch.output_conv2(out)
|
||||||
|
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class DPT_DINOv2(nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
features,
|
||||||
|
out_channels,
|
||||||
|
encoder="vitl",
|
||||||
|
use_bn=False,
|
||||||
|
use_clstoken=False,
|
||||||
|
):
|
||||||
|
super(DPT_DINOv2, self).__init__()
|
||||||
|
|
||||||
|
assert encoder in ["vits", "vitb", "vitl"]
|
||||||
|
|
||||||
|
# # in case the Internet connection is not stable, please load the DINOv2 locally
|
||||||
|
# if use_local:
|
||||||
|
# self.pretrained = torch.hub.load(
|
||||||
|
# torchhub_path / "facebookresearch_dinov2_main",
|
||||||
|
# "dinov2_{:}14".format(encoder),
|
||||||
|
# source="local",
|
||||||
|
# pretrained=False,
|
||||||
|
# )
|
||||||
|
# else:
|
||||||
|
# self.pretrained = torch.hub.load(
|
||||||
|
# "facebookresearch/dinov2",
|
||||||
|
# "dinov2_{:}14".format(encoder),
|
||||||
|
# )
|
||||||
|
|
||||||
|
self.pretrained = torch.hub.load(
|
||||||
|
"facebookresearch/dinov2",
|
||||||
|
"dinov2_{:}14".format(encoder),
|
||||||
|
)
|
||||||
|
|
||||||
|
dim = self.pretrained.blocks[0].attn.qkv.in_features
|
||||||
|
|
||||||
|
self.depth_head = DPTHead(1, dim, features, out_channels=out_channels, use_bn=use_bn, use_clstoken=use_clstoken)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
h, w = x.shape[-2:]
|
||||||
|
|
||||||
|
features = self.pretrained.get_intermediate_layers(x, 4, return_class_token=True)
|
||||||
|
|
||||||
|
patch_h, patch_w = h // 14, w // 14
|
||||||
|
|
||||||
|
depth = self.depth_head(features, patch_h, patch_w)
|
||||||
|
depth = F.interpolate(depth, size=(h, w), mode="bilinear", align_corners=True)
|
||||||
|
depth = F.relu(depth)
|
||||||
|
|
||||||
|
return depth.squeeze(1)
|
227
invokeai/backend/image_util/depth_anything/utilities/util.py
Normal file
227
invokeai/backend/image_util/depth_anything/utilities/util.py
Normal file
@ -0,0 +1,227 @@
|
|||||||
|
import math
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
import torch.nn.functional as F
|
||||||
|
|
||||||
|
|
||||||
|
def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
|
||||||
|
"""Rezise the sample to ensure the given size. Keeps aspect ratio.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sample (dict): sample
|
||||||
|
size (tuple): image size
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: new size
|
||||||
|
"""
|
||||||
|
shape = list(sample["disparity"].shape)
|
||||||
|
|
||||||
|
if shape[0] >= size[0] and shape[1] >= size[1]:
|
||||||
|
return sample
|
||||||
|
|
||||||
|
scale = [0, 0]
|
||||||
|
scale[0] = size[0] / shape[0]
|
||||||
|
scale[1] = size[1] / shape[1]
|
||||||
|
|
||||||
|
scale = max(scale)
|
||||||
|
|
||||||
|
shape[0] = math.ceil(scale * shape[0])
|
||||||
|
shape[1] = math.ceil(scale * shape[1])
|
||||||
|
|
||||||
|
# resize
|
||||||
|
sample["image"] = cv2.resize(sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method)
|
||||||
|
|
||||||
|
sample["disparity"] = cv2.resize(sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST)
|
||||||
|
sample["mask"] = cv2.resize(
|
||||||
|
sample["mask"].astype(np.float32),
|
||||||
|
tuple(shape[::-1]),
|
||||||
|
interpolation=cv2.INTER_NEAREST,
|
||||||
|
)
|
||||||
|
sample["mask"] = sample["mask"].astype(bool)
|
||||||
|
|
||||||
|
return tuple(shape)
|
||||||
|
|
||||||
|
|
||||||
|
class Resize(object):
|
||||||
|
"""Resize sample to given size (width, height)."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
width,
|
||||||
|
height,
|
||||||
|
resize_target=True,
|
||||||
|
keep_aspect_ratio=False,
|
||||||
|
ensure_multiple_of=1,
|
||||||
|
resize_method="lower_bound",
|
||||||
|
image_interpolation_method=cv2.INTER_AREA,
|
||||||
|
):
|
||||||
|
"""Init.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
width (int): desired output width
|
||||||
|
height (int): desired output height
|
||||||
|
resize_target (bool, optional):
|
||||||
|
True: Resize the full sample (image, mask, target).
|
||||||
|
False: Resize image only.
|
||||||
|
Defaults to True.
|
||||||
|
keep_aspect_ratio (bool, optional):
|
||||||
|
True: Keep the aspect ratio of the input sample.
|
||||||
|
Output sample might not have the given width and height, and
|
||||||
|
resize behaviour depends on the parameter 'resize_method'.
|
||||||
|
Defaults to False.
|
||||||
|
ensure_multiple_of (int, optional):
|
||||||
|
Output width and height is constrained to be multiple of this parameter.
|
||||||
|
Defaults to 1.
|
||||||
|
resize_method (str, optional):
|
||||||
|
"lower_bound": Output will be at least as large as the given size.
|
||||||
|
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller
|
||||||
|
than given size.)
|
||||||
|
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
|
||||||
|
Defaults to "lower_bound".
|
||||||
|
"""
|
||||||
|
self.__width = width
|
||||||
|
self.__height = height
|
||||||
|
|
||||||
|
self.__resize_target = resize_target
|
||||||
|
self.__keep_aspect_ratio = keep_aspect_ratio
|
||||||
|
self.__multiple_of = ensure_multiple_of
|
||||||
|
self.__resize_method = resize_method
|
||||||
|
self.__image_interpolation_method = image_interpolation_method
|
||||||
|
|
||||||
|
def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
|
||||||
|
y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
||||||
|
|
||||||
|
if max_val is not None and y > max_val:
|
||||||
|
y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
||||||
|
|
||||||
|
if y < min_val:
|
||||||
|
y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
||||||
|
|
||||||
|
return y
|
||||||
|
|
||||||
|
def get_size(self, width, height):
|
||||||
|
# determine new height and width
|
||||||
|
scale_height = self.__height / height
|
||||||
|
scale_width = self.__width / width
|
||||||
|
|
||||||
|
if self.__keep_aspect_ratio:
|
||||||
|
if self.__resize_method == "lower_bound":
|
||||||
|
# scale such that output size is lower bound
|
||||||
|
if scale_width > scale_height:
|
||||||
|
# fit width
|
||||||
|
scale_height = scale_width
|
||||||
|
else:
|
||||||
|
# fit height
|
||||||
|
scale_width = scale_height
|
||||||
|
elif self.__resize_method == "upper_bound":
|
||||||
|
# scale such that output size is upper bound
|
||||||
|
if scale_width < scale_height:
|
||||||
|
# fit width
|
||||||
|
scale_height = scale_width
|
||||||
|
else:
|
||||||
|
# fit height
|
||||||
|
scale_width = scale_height
|
||||||
|
elif self.__resize_method == "minimal":
|
||||||
|
# scale as least as possbile
|
||||||
|
if abs(1 - scale_width) < abs(1 - scale_height):
|
||||||
|
# fit width
|
||||||
|
scale_height = scale_width
|
||||||
|
else:
|
||||||
|
# fit height
|
||||||
|
scale_width = scale_height
|
||||||
|
else:
|
||||||
|
raise ValueError(f"resize_method {self.__resize_method} not implemented")
|
||||||
|
|
||||||
|
if self.__resize_method == "lower_bound":
|
||||||
|
new_height = self.constrain_to_multiple_of(scale_height * height, min_val=self.__height)
|
||||||
|
new_width = self.constrain_to_multiple_of(scale_width * width, min_val=self.__width)
|
||||||
|
elif self.__resize_method == "upper_bound":
|
||||||
|
new_height = self.constrain_to_multiple_of(scale_height * height, max_val=self.__height)
|
||||||
|
new_width = self.constrain_to_multiple_of(scale_width * width, max_val=self.__width)
|
||||||
|
elif self.__resize_method == "minimal":
|
||||||
|
new_height = self.constrain_to_multiple_of(scale_height * height)
|
||||||
|
new_width = self.constrain_to_multiple_of(scale_width * width)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"resize_method {self.__resize_method} not implemented")
|
||||||
|
|
||||||
|
return (new_width, new_height)
|
||||||
|
|
||||||
|
def __call__(self, sample):
|
||||||
|
width, height = self.get_size(sample["image"].shape[1], sample["image"].shape[0])
|
||||||
|
|
||||||
|
# resize sample
|
||||||
|
sample["image"] = cv2.resize(
|
||||||
|
sample["image"],
|
||||||
|
(width, height),
|
||||||
|
interpolation=self.__image_interpolation_method,
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.__resize_target:
|
||||||
|
if "disparity" in sample:
|
||||||
|
sample["disparity"] = cv2.resize(
|
||||||
|
sample["disparity"],
|
||||||
|
(width, height),
|
||||||
|
interpolation=cv2.INTER_NEAREST,
|
||||||
|
)
|
||||||
|
|
||||||
|
if "depth" in sample:
|
||||||
|
sample["depth"] = cv2.resize(sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST)
|
||||||
|
|
||||||
|
if "semseg_mask" in sample:
|
||||||
|
# sample["semseg_mask"] = cv2.resize(
|
||||||
|
# sample["semseg_mask"], (width, height), interpolation=cv2.INTER_NEAREST
|
||||||
|
# )
|
||||||
|
sample["semseg_mask"] = F.interpolate(
|
||||||
|
torch.from_numpy(sample["semseg_mask"]).float()[None, None, ...], (height, width), mode="nearest"
|
||||||
|
).numpy()[0, 0]
|
||||||
|
|
||||||
|
if "mask" in sample:
|
||||||
|
sample["mask"] = cv2.resize(
|
||||||
|
sample["mask"].astype(np.float32),
|
||||||
|
(width, height),
|
||||||
|
interpolation=cv2.INTER_NEAREST,
|
||||||
|
)
|
||||||
|
# sample["mask"] = sample["mask"].astype(bool)
|
||||||
|
|
||||||
|
# print(sample['image'].shape, sample['depth'].shape)
|
||||||
|
return sample
|
||||||
|
|
||||||
|
|
||||||
|
class NormalizeImage(object):
|
||||||
|
"""Normlize image by given mean and std."""
|
||||||
|
|
||||||
|
def __init__(self, mean, std):
|
||||||
|
self.__mean = mean
|
||||||
|
self.__std = std
|
||||||
|
|
||||||
|
def __call__(self, sample):
|
||||||
|
sample["image"] = (sample["image"] - self.__mean) / self.__std
|
||||||
|
|
||||||
|
return sample
|
||||||
|
|
||||||
|
|
||||||
|
class PrepareForNet(object):
|
||||||
|
"""Prepare sample for usage as network input."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __call__(self, sample):
|
||||||
|
image = np.transpose(sample["image"], (2, 0, 1))
|
||||||
|
sample["image"] = np.ascontiguousarray(image).astype(np.float32)
|
||||||
|
|
||||||
|
if "mask" in sample:
|
||||||
|
sample["mask"] = sample["mask"].astype(np.float32)
|
||||||
|
sample["mask"] = np.ascontiguousarray(sample["mask"])
|
||||||
|
|
||||||
|
if "depth" in sample:
|
||||||
|
depth = sample["depth"].astype(np.float32)
|
||||||
|
sample["depth"] = np.ascontiguousarray(depth)
|
||||||
|
|
||||||
|
if "semseg_mask" in sample:
|
||||||
|
sample["semseg_mask"] = sample["semseg_mask"].astype(np.float32)
|
||||||
|
sample["semseg_mask"] = np.ascontiguousarray(sample["semseg_mask"])
|
||||||
|
|
||||||
|
return sample
|
281
invokeai/backend/install/install_helper.py
Normal file
281
invokeai/backend/install/install_helper.py
Normal file
@ -0,0 +1,281 @@
|
|||||||
|
"""Utility (backend) functions used by model_install.py"""
|
||||||
|
import re
|
||||||
|
from logging import Logger
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
import omegaconf
|
||||||
|
from huggingface_hub import HfFolder
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from pydantic.dataclasses import dataclass
|
||||||
|
from pydantic.networks import AnyHttpUrl
|
||||||
|
from requests import HTTPError
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
import invokeai.configs as configs
|
||||||
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
|
from invokeai.app.services.download import DownloadQueueService
|
||||||
|
from invokeai.app.services.events.events_base import EventServiceBase
|
||||||
|
from invokeai.app.services.image_files.image_files_disk import DiskImageFileStorage
|
||||||
|
from invokeai.app.services.model_install import (
|
||||||
|
HFModelSource,
|
||||||
|
LocalModelSource,
|
||||||
|
ModelInstallService,
|
||||||
|
ModelInstallServiceBase,
|
||||||
|
ModelSource,
|
||||||
|
URLModelSource,
|
||||||
|
)
|
||||||
|
from invokeai.app.services.model_records import ModelRecordServiceBase, ModelRecordServiceSQL
|
||||||
|
from invokeai.app.services.shared.sqlite.sqlite_util import init_db
|
||||||
|
from invokeai.backend.model_manager import (
|
||||||
|
BaseModelType,
|
||||||
|
InvalidModelConfigException,
|
||||||
|
ModelType,
|
||||||
|
)
|
||||||
|
from invokeai.backend.model_manager.metadata import UnknownMetadataException
|
||||||
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
|
|
||||||
|
# name of the starter models file
|
||||||
|
INITIAL_MODELS = "INITIAL_MODELS2.yaml"
|
||||||
|
|
||||||
|
|
||||||
|
def initialize_record_store(app_config: InvokeAIAppConfig) -> ModelRecordServiceBase:
|
||||||
|
"""Return an initialized ModelConfigRecordServiceBase object."""
|
||||||
|
logger = InvokeAILogger.get_logger(config=app_config)
|
||||||
|
image_files = DiskImageFileStorage(f"{app_config.output_path}/images")
|
||||||
|
db = init_db(config=app_config, logger=logger, image_files=image_files)
|
||||||
|
obj: ModelRecordServiceBase = ModelRecordServiceSQL(db)
|
||||||
|
return obj
|
||||||
|
|
||||||
|
|
||||||
|
def initialize_installer(
|
||||||
|
app_config: InvokeAIAppConfig, event_bus: Optional[EventServiceBase] = None
|
||||||
|
) -> ModelInstallServiceBase:
|
||||||
|
"""Return an initialized ModelInstallService object."""
|
||||||
|
record_store = initialize_record_store(app_config)
|
||||||
|
metadata_store = record_store.metadata_store
|
||||||
|
download_queue = DownloadQueueService()
|
||||||
|
installer = ModelInstallService(
|
||||||
|
app_config=app_config,
|
||||||
|
record_store=record_store,
|
||||||
|
metadata_store=metadata_store,
|
||||||
|
download_queue=download_queue,
|
||||||
|
event_bus=event_bus,
|
||||||
|
)
|
||||||
|
download_queue.start()
|
||||||
|
installer.start()
|
||||||
|
return installer
|
||||||
|
|
||||||
|
|
||||||
|
class UnifiedModelInfo(BaseModel):
|
||||||
|
"""Catchall class for information in INITIAL_MODELS2.yaml."""
|
||||||
|
|
||||||
|
name: Optional[str] = None
|
||||||
|
base: Optional[BaseModelType] = None
|
||||||
|
type: Optional[ModelType] = None
|
||||||
|
source: Optional[str] = None
|
||||||
|
subfolder: Optional[str] = None
|
||||||
|
description: Optional[str] = None
|
||||||
|
recommended: bool = False
|
||||||
|
installed: bool = False
|
||||||
|
default: bool = False
|
||||||
|
requires: List[str] = Field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class InstallSelections:
|
||||||
|
"""Lists of models to install and remove."""
|
||||||
|
|
||||||
|
install_models: List[UnifiedModelInfo] = Field(default_factory=list)
|
||||||
|
remove_models: List[str] = Field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
|
class TqdmEventService(EventServiceBase):
|
||||||
|
"""An event service to track downloads."""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
"""Create a new TqdmEventService object."""
|
||||||
|
super().__init__()
|
||||||
|
self._bars: Dict[str, tqdm] = {}
|
||||||
|
self._last: Dict[str, int] = {}
|
||||||
|
|
||||||
|
def dispatch(self, event_name: str, payload: Any) -> None:
|
||||||
|
"""Dispatch an event by appending it to self.events."""
|
||||||
|
if payload["event"] == "model_install_downloading":
|
||||||
|
data = payload["data"]
|
||||||
|
dest = data["local_path"]
|
||||||
|
total_bytes = data["total_bytes"]
|
||||||
|
bytes = data["bytes"]
|
||||||
|
if dest not in self._bars:
|
||||||
|
self._bars[dest] = tqdm(desc=Path(dest).name, initial=0, total=total_bytes, unit="iB", unit_scale=True)
|
||||||
|
self._last[dest] = 0
|
||||||
|
self._bars[dest].update(bytes - self._last[dest])
|
||||||
|
self._last[dest] = bytes
|
||||||
|
|
||||||
|
|
||||||
|
class InstallHelper(object):
|
||||||
|
"""Capture information stored jointly in INITIAL_MODELS.yaml and the installed models db."""
|
||||||
|
|
||||||
|
def __init__(self, app_config: InvokeAIAppConfig, logger: Logger):
|
||||||
|
"""Create new InstallHelper object."""
|
||||||
|
self._app_config = app_config
|
||||||
|
self.all_models: Dict[str, UnifiedModelInfo] = {}
|
||||||
|
|
||||||
|
omega = omegaconf.OmegaConf.load(Path(configs.__path__[0]) / INITIAL_MODELS)
|
||||||
|
assert isinstance(omega, omegaconf.dictconfig.DictConfig)
|
||||||
|
|
||||||
|
self._installer = initialize_installer(app_config, TqdmEventService())
|
||||||
|
self._initial_models = omega
|
||||||
|
self._installed_models: List[str] = []
|
||||||
|
self._starter_models: List[str] = []
|
||||||
|
self._default_model: Optional[str] = None
|
||||||
|
self._logger = logger
|
||||||
|
self._initialize_model_lists()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def installer(self) -> ModelInstallServiceBase:
|
||||||
|
"""Return the installer object used internally."""
|
||||||
|
return self._installer
|
||||||
|
|
||||||
|
def _initialize_model_lists(self) -> None:
|
||||||
|
"""
|
||||||
|
Initialize our model slots.
|
||||||
|
|
||||||
|
Set up the following:
|
||||||
|
installed_models -- list of installed model keys
|
||||||
|
starter_models -- list of starter model keys from INITIAL_MODELS
|
||||||
|
all_models -- dict of key => UnifiedModelInfo
|
||||||
|
default_model -- key to default model
|
||||||
|
"""
|
||||||
|
# previously-installed models
|
||||||
|
for model in self._installer.record_store.all_models():
|
||||||
|
info = UnifiedModelInfo.parse_obj(model.dict())
|
||||||
|
info.installed = True
|
||||||
|
model_key = f"{model.base.value}/{model.type.value}/{model.name}"
|
||||||
|
self.all_models[model_key] = info
|
||||||
|
self._installed_models.append(model_key)
|
||||||
|
|
||||||
|
for key in self._initial_models.keys():
|
||||||
|
assert isinstance(key, str)
|
||||||
|
if key in self.all_models:
|
||||||
|
# we want to preserve the description
|
||||||
|
description = self.all_models[key].description or self._initial_models[key].get("description")
|
||||||
|
self.all_models[key].description = description
|
||||||
|
else:
|
||||||
|
base_model, model_type, model_name = key.split("/")
|
||||||
|
info = UnifiedModelInfo(
|
||||||
|
name=model_name,
|
||||||
|
type=ModelType(model_type),
|
||||||
|
base=BaseModelType(base_model),
|
||||||
|
source=self._initial_models[key].source,
|
||||||
|
description=self._initial_models[key].get("description"),
|
||||||
|
recommended=self._initial_models[key].get("recommended", False),
|
||||||
|
default=self._initial_models[key].get("default", False),
|
||||||
|
subfolder=self._initial_models[key].get("subfolder"),
|
||||||
|
requires=list(self._initial_models[key].get("requires", [])),
|
||||||
|
)
|
||||||
|
self.all_models[key] = info
|
||||||
|
if not self.default_model():
|
||||||
|
self._default_model = key
|
||||||
|
elif self._initial_models[key].get("default", False):
|
||||||
|
self._default_model = key
|
||||||
|
self._starter_models.append(key)
|
||||||
|
|
||||||
|
# previously-installed models
|
||||||
|
for model in self._installer.record_store.all_models():
|
||||||
|
info = UnifiedModelInfo.parse_obj(model.dict())
|
||||||
|
info.installed = True
|
||||||
|
model_key = f"{model.base.value}/{model.type.value}/{model.name}"
|
||||||
|
self.all_models[model_key] = info
|
||||||
|
self._installed_models.append(model_key)
|
||||||
|
|
||||||
|
def recommended_models(self) -> List[UnifiedModelInfo]:
|
||||||
|
"""List of the models recommended in INITIAL_MODELS.yaml."""
|
||||||
|
return [self._to_model(x) for x in self._starter_models if self._to_model(x).recommended]
|
||||||
|
|
||||||
|
def installed_models(self) -> List[UnifiedModelInfo]:
|
||||||
|
"""List of models already installed."""
|
||||||
|
return [self._to_model(x) for x in self._installed_models]
|
||||||
|
|
||||||
|
def starter_models(self) -> List[UnifiedModelInfo]:
|
||||||
|
"""List of starter models."""
|
||||||
|
return [self._to_model(x) for x in self._starter_models]
|
||||||
|
|
||||||
|
def default_model(self) -> Optional[UnifiedModelInfo]:
|
||||||
|
"""Return the default model."""
|
||||||
|
return self._to_model(self._default_model) if self._default_model else None
|
||||||
|
|
||||||
|
def _to_model(self, key: str) -> UnifiedModelInfo:
|
||||||
|
return self.all_models[key]
|
||||||
|
|
||||||
|
def _add_required_models(self, model_list: List[UnifiedModelInfo]) -> None:
|
||||||
|
installed = {x.source for x in self.installed_models()}
|
||||||
|
reverse_source = {x.source: x for x in self.all_models.values()}
|
||||||
|
additional_models: List[UnifiedModelInfo] = []
|
||||||
|
for model_info in model_list:
|
||||||
|
for requirement in model_info.requires:
|
||||||
|
if requirement not in installed and reverse_source.get(requirement):
|
||||||
|
additional_models.append(reverse_source[requirement])
|
||||||
|
model_list.extend(additional_models)
|
||||||
|
|
||||||
|
def _make_install_source(self, model_info: UnifiedModelInfo) -> ModelSource:
|
||||||
|
assert model_info.source
|
||||||
|
model_path_id_or_url = model_info.source.strip("\"' ")
|
||||||
|
model_path = Path(model_path_id_or_url)
|
||||||
|
|
||||||
|
if model_path.exists(): # local file on disk
|
||||||
|
return LocalModelSource(path=model_path.absolute(), inplace=True)
|
||||||
|
if re.match(r"^[^/]+/[^/]+$", model_path_id_or_url): # hugging face repo_id
|
||||||
|
return HFModelSource(
|
||||||
|
repo_id=model_path_id_or_url,
|
||||||
|
access_token=HfFolder.get_token(),
|
||||||
|
subfolder=model_info.subfolder,
|
||||||
|
)
|
||||||
|
if re.match(r"^(http|https):", model_path_id_or_url):
|
||||||
|
return URLModelSource(url=AnyHttpUrl(model_path_id_or_url))
|
||||||
|
raise ValueError(f"Unsupported model source: {model_path_id_or_url}")
|
||||||
|
|
||||||
|
def add_or_delete(self, selections: InstallSelections) -> None:
|
||||||
|
"""Add or delete selected models."""
|
||||||
|
installer = self._installer
|
||||||
|
self._add_required_models(selections.install_models)
|
||||||
|
for model in selections.install_models:
|
||||||
|
source = self._make_install_source(model)
|
||||||
|
config = (
|
||||||
|
{
|
||||||
|
"description": model.description,
|
||||||
|
"name": model.name,
|
||||||
|
}
|
||||||
|
if model.name
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
installer.import_model(
|
||||||
|
source=source,
|
||||||
|
config=config,
|
||||||
|
)
|
||||||
|
except (UnknownMetadataException, InvalidModelConfigException, HTTPError, OSError) as e:
|
||||||
|
self._logger.warning(f"{source}: {e}")
|
||||||
|
|
||||||
|
for model_to_remove in selections.remove_models:
|
||||||
|
parts = model_to_remove.split("/")
|
||||||
|
if len(parts) == 1:
|
||||||
|
base_model, model_type, model_name = (None, None, model_to_remove)
|
||||||
|
else:
|
||||||
|
base_model, model_type, model_name = parts
|
||||||
|
matches = installer.record_store.search_by_attr(
|
||||||
|
base_model=BaseModelType(base_model) if base_model else None,
|
||||||
|
model_type=ModelType(model_type) if model_type else None,
|
||||||
|
model_name=model_name,
|
||||||
|
)
|
||||||
|
if len(matches) > 1:
|
||||||
|
print(f"{model} is ambiguous. Please use model_type:model_name (e.g. main:my_model) to disambiguate.")
|
||||||
|
elif not matches:
|
||||||
|
print(f"{model}: unknown model")
|
||||||
|
else:
|
||||||
|
for m in matches:
|
||||||
|
print(f"Deleting {m.type}:{m.name}")
|
||||||
|
installer.delete(m.key)
|
||||||
|
|
||||||
|
installer.wait_for_installs()
|
@ -849,7 +849,7 @@ def migrate_if_needed(opt: Namespace, root: Path) -> bool:
|
|||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def main():
|
def main() -> None:
|
||||||
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--skip-sd-weights",
|
"--skip-sd-weights",
|
||||||
|
@ -104,12 +104,14 @@ class ModelInstall(object):
|
|||||||
prediction_type_helper: Optional[Callable[[Path], SchedulerPredictionType]] = None,
|
prediction_type_helper: Optional[Callable[[Path], SchedulerPredictionType]] = None,
|
||||||
model_manager: Optional[ModelManager] = None,
|
model_manager: Optional[ModelManager] = None,
|
||||||
access_token: Optional[str] = None,
|
access_token: Optional[str] = None,
|
||||||
|
civitai_api_key: Optional[str] = None,
|
||||||
):
|
):
|
||||||
self.config = config
|
self.config = config
|
||||||
self.mgr = model_manager or ModelManager(config.model_conf_path)
|
self.mgr = model_manager or ModelManager(config.model_conf_path)
|
||||||
self.datasets = OmegaConf.load(Dataset_path)
|
self.datasets = OmegaConf.load(Dataset_path)
|
||||||
self.prediction_helper = prediction_type_helper
|
self.prediction_helper = prediction_type_helper
|
||||||
self.access_token = access_token or HfFolder.get_token()
|
self.access_token = access_token or HfFolder.get_token()
|
||||||
|
self.civitai_api_key = civitai_api_key or config.civitai_api_key
|
||||||
self.reverse_paths = self._reverse_paths(self.datasets)
|
self.reverse_paths = self._reverse_paths(self.datasets)
|
||||||
|
|
||||||
def all_models(self) -> Dict[str, ModelLoadInfo]:
|
def all_models(self) -> Dict[str, ModelLoadInfo]:
|
||||||
@ -326,7 +328,11 @@ class ModelInstall(object):
|
|||||||
|
|
||||||
def _install_url(self, url: str) -> AddModelResult:
|
def _install_url(self, url: str) -> AddModelResult:
|
||||||
with TemporaryDirectory(dir=self.config.models_path) as staging:
|
with TemporaryDirectory(dir=self.config.models_path) as staging:
|
||||||
location = download_with_resume(url, Path(staging))
|
CIVITAI_RE = r".*civitai.com.*"
|
||||||
|
civit_url = re.match(CIVITAI_RE, url, re.IGNORECASE)
|
||||||
|
location = download_with_resume(
|
||||||
|
url, Path(staging), access_token=self.civitai_api_key if civit_url else None
|
||||||
|
)
|
||||||
if not location:
|
if not location:
|
||||||
logger.error(f"Unable to download {url}. Skipping.")
|
logger.error(f"Unable to download {url}. Skipping.")
|
||||||
info = ModelProbe().heuristic_probe(location, self.prediction_helper)
|
info = ModelProbe().heuristic_probe(location, self.prediction_helper)
|
||||||
|
@ -141,7 +141,7 @@ class StableDiffusionXLModel(DiffusersModel):
|
|||||||
version=base_model,
|
version=base_model,
|
||||||
model_config=config,
|
model_config=config,
|
||||||
output_path=output_path,
|
output_path=output_path,
|
||||||
use_safetensors=False, # corrupts sdxl models for some reason
|
use_safetensors=True,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
177
invokeai/backend/model_manager/merge.py
Normal file
177
invokeai/backend/model_manager/merge.py
Normal file
@ -0,0 +1,177 @@
|
|||||||
|
"""
|
||||||
|
invokeai.backend.model_manager.merge exports:
|
||||||
|
merge_diffusion_models() -- combine multiple models by location and return a pipeline object
|
||||||
|
merge_diffusion_models_and_commit() -- combine multiple models by ModelManager ID and write to models.yaml
|
||||||
|
|
||||||
|
Copyright (c) 2023 Lincoln Stein and the InvokeAI Development Team
|
||||||
|
"""
|
||||||
|
|
||||||
|
import warnings
|
||||||
|
from enum import Enum
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, List, Optional, Set
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from diffusers import AutoPipelineForText2Image
|
||||||
|
from diffusers import logging as dlogging
|
||||||
|
|
||||||
|
from invokeai.app.services.model_install import ModelInstallServiceBase
|
||||||
|
from invokeai.backend.util.devices import choose_torch_device, torch_dtype
|
||||||
|
|
||||||
|
from . import (
|
||||||
|
AnyModelConfig,
|
||||||
|
BaseModelType,
|
||||||
|
ModelType,
|
||||||
|
ModelVariantType,
|
||||||
|
)
|
||||||
|
from .config import MainDiffusersConfig
|
||||||
|
|
||||||
|
|
||||||
|
class MergeInterpolationMethod(str, Enum):
|
||||||
|
WeightedSum = "weighted_sum"
|
||||||
|
Sigmoid = "sigmoid"
|
||||||
|
InvSigmoid = "inv_sigmoid"
|
||||||
|
AddDifference = "add_difference"
|
||||||
|
|
||||||
|
|
||||||
|
class ModelMerger(object):
|
||||||
|
"""Wrapper class for model merge function."""
|
||||||
|
|
||||||
|
def __init__(self, installer: ModelInstallServiceBase):
|
||||||
|
"""
|
||||||
|
Initialize a ModelMerger object.
|
||||||
|
|
||||||
|
:param store: Underlying storage manager for the running process.
|
||||||
|
:param config: InvokeAIAppConfig object (if not provided, default will be selected).
|
||||||
|
"""
|
||||||
|
self._installer = installer
|
||||||
|
|
||||||
|
def merge_diffusion_models(
|
||||||
|
self,
|
||||||
|
model_paths: List[Path],
|
||||||
|
alpha: float = 0.5,
|
||||||
|
interp: Optional[MergeInterpolationMethod] = None,
|
||||||
|
force: bool = False,
|
||||||
|
variant: Optional[str] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> Any: # pipe.merge is an untyped function.
|
||||||
|
"""
|
||||||
|
:param model_paths: up to three models, designated by their local paths or HuggingFace repo_ids
|
||||||
|
:param alpha: The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
|
||||||
|
would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
|
||||||
|
:param interp: The interpolation method to use for the merging. Supports "sigmoid", "inv_sigmoid", "add_difference" and None.
|
||||||
|
Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_difference" is supported.
|
||||||
|
:param force: Whether to ignore mismatch in model_config.json for the current models. Defaults to False.
|
||||||
|
|
||||||
|
**kwargs - the default DiffusionPipeline.get_config_dict kwargs:
|
||||||
|
cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map
|
||||||
|
"""
|
||||||
|
with warnings.catch_warnings():
|
||||||
|
warnings.simplefilter("ignore")
|
||||||
|
verbosity = dlogging.get_verbosity()
|
||||||
|
dlogging.set_verbosity_error()
|
||||||
|
dtype = torch.float16 if variant == "fp16" else torch_dtype(choose_torch_device())
|
||||||
|
|
||||||
|
# Note that checkpoint_merger will not work with downloaded HuggingFace fp16 models
|
||||||
|
# until upstream https://github.com/huggingface/diffusers/pull/6670 is merged and released.
|
||||||
|
pipe = AutoPipelineForText2Image.from_pretrained(
|
||||||
|
model_paths[0],
|
||||||
|
custom_pipeline="checkpoint_merger",
|
||||||
|
torch_dtype=dtype,
|
||||||
|
variant=variant,
|
||||||
|
)
|
||||||
|
merged_pipe = pipe.merge(
|
||||||
|
pretrained_model_name_or_path_list=model_paths,
|
||||||
|
alpha=alpha,
|
||||||
|
interp=interp.value if interp else None, # diffusers API treats None as "weighted sum"
|
||||||
|
force=force,
|
||||||
|
torch_dtype=dtype,
|
||||||
|
variant=variant,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
dlogging.set_verbosity(verbosity)
|
||||||
|
return merged_pipe
|
||||||
|
|
||||||
|
def merge_diffusion_models_and_save(
|
||||||
|
self,
|
||||||
|
model_keys: List[str],
|
||||||
|
merged_model_name: str,
|
||||||
|
alpha: float = 0.5,
|
||||||
|
force: bool = False,
|
||||||
|
interp: Optional[MergeInterpolationMethod] = None,
|
||||||
|
merge_dest_directory: Optional[Path] = None,
|
||||||
|
variant: Optional[str] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> AnyModelConfig:
|
||||||
|
"""
|
||||||
|
:param models: up to three models, designated by their InvokeAI models.yaml model name
|
||||||
|
:param merged_model_name: name for new model
|
||||||
|
:param alpha: The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
|
||||||
|
would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
|
||||||
|
:param interp: The interpolation method to use for the merging. Supports "weighted_average", "sigmoid", "inv_sigmoid", "add_difference" and None.
|
||||||
|
Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_difference" is supported. Add_difference is A+(B-C).
|
||||||
|
:param force: Whether to ignore mismatch in model_config.json for the current models. Defaults to False.
|
||||||
|
:param merge_dest_directory: Save the merged model to the designated directory (with 'merged_model_name' appended)
|
||||||
|
**kwargs - the default DiffusionPipeline.get_config_dict kwargs:
|
||||||
|
cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map
|
||||||
|
"""
|
||||||
|
model_paths: List[Path] = []
|
||||||
|
model_names: List[str] = []
|
||||||
|
config = self._installer.app_config
|
||||||
|
store = self._installer.record_store
|
||||||
|
base_models: Set[BaseModelType] = set()
|
||||||
|
vae = None
|
||||||
|
variant = None if self._installer.app_config.full_precision else "fp16"
|
||||||
|
|
||||||
|
assert (
|
||||||
|
len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference
|
||||||
|
), "When merging three models, only the 'add_difference' merge method is supported"
|
||||||
|
|
||||||
|
for key in model_keys:
|
||||||
|
info = store.get_model(key)
|
||||||
|
model_names.append(info.name)
|
||||||
|
assert isinstance(
|
||||||
|
info, MainDiffusersConfig
|
||||||
|
), f"{info.name} ({info.key}) is not a diffusers model. It must be optimized before merging"
|
||||||
|
assert info.variant == ModelVariantType(
|
||||||
|
"normal"
|
||||||
|
), f"{info.name} ({info.key}) is a {info.variant} model, which cannot currently be merged"
|
||||||
|
|
||||||
|
# pick up the first model's vae
|
||||||
|
if key == model_keys[0]:
|
||||||
|
vae = info.vae
|
||||||
|
|
||||||
|
# tally base models used
|
||||||
|
base_models.add(info.base)
|
||||||
|
model_paths.extend([config.models_path / info.path])
|
||||||
|
|
||||||
|
assert len(base_models) == 1, f"All models to merge must have same base model, but found bases {base_models}"
|
||||||
|
base_model = base_models.pop()
|
||||||
|
|
||||||
|
merge_method = None if interp == "weighted_sum" else MergeInterpolationMethod(interp)
|
||||||
|
merged_pipe = self.merge_diffusion_models(model_paths, alpha, merge_method, force, variant=variant, **kwargs)
|
||||||
|
dump_path = (
|
||||||
|
Path(merge_dest_directory)
|
||||||
|
if merge_dest_directory
|
||||||
|
else config.models_path / base_model.value / ModelType.Main.value
|
||||||
|
)
|
||||||
|
dump_path.mkdir(parents=True, exist_ok=True)
|
||||||
|
dump_path = dump_path / merged_model_name
|
||||||
|
|
||||||
|
dtype = torch.float16 if variant == "fp16" else torch_dtype(choose_torch_device())
|
||||||
|
merged_pipe.save_pretrained(dump_path.as_posix(), safe_serialization=True, torch_dtype=dtype, variant=variant)
|
||||||
|
|
||||||
|
# register model and get its unique key
|
||||||
|
key = self._installer.register_path(dump_path)
|
||||||
|
|
||||||
|
# update model's config
|
||||||
|
model_config = self._installer.record_store.get_model(key)
|
||||||
|
model_config.update(
|
||||||
|
{
|
||||||
|
"name": merged_model_name,
|
||||||
|
"description": f"Merge of models {', '.join(model_names)}",
|
||||||
|
"vae": vae,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
self._installer.record_store.update_model(key, model_config)
|
||||||
|
return model_config
|
@ -170,6 +170,8 @@ class CivitaiMetadataFetch(ModelMetadataFetchBase):
|
|||||||
if model_id is None:
|
if model_id is None:
|
||||||
version_url = CIVITAI_VERSION_ENDPOINT + str(version_id)
|
version_url = CIVITAI_VERSION_ENDPOINT + str(version_id)
|
||||||
version = self._requests.get(version_url).json()
|
version = self._requests.get(version_url).json()
|
||||||
|
if error := version.get("error"):
|
||||||
|
raise UnknownMetadataException(error)
|
||||||
model_id = version["modelId"]
|
model_id = version["modelId"]
|
||||||
|
|
||||||
model_url = CIVITAI_MODEL_ENDPOINT + str(model_id)
|
model_url = CIVITAI_MODEL_ENDPOINT + str(model_id)
|
||||||
|
@ -12,7 +12,7 @@ import psutil
|
|||||||
import torch
|
import torch
|
||||||
from compel.cross_attention_control import Arguments
|
from compel.cross_attention_control import Arguments
|
||||||
from diffusers.models.attention_processor import Attention, AttentionProcessor, AttnProcessor, SlicedAttnProcessor
|
from diffusers.models.attention_processor import Attention, AttentionProcessor, AttnProcessor, SlicedAttnProcessor
|
||||||
from diffusers.models.unet_2d_condition import UNet2DConditionModel
|
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
||||||
from torch import nn
|
from torch import nn
|
||||||
|
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
|
@ -11,6 +11,7 @@ import logging
|
|||||||
import math
|
import math
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
|
from argparse import Namespace
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
@ -30,8 +31,6 @@ from diffusers.optimization import get_scheduler
|
|||||||
from diffusers.utils import check_min_version
|
from diffusers.utils import check_min_version
|
||||||
from diffusers.utils.import_utils import is_xformers_available
|
from diffusers.utils.import_utils import is_xformers_available
|
||||||
from huggingface_hub import HfFolder, Repository, whoami
|
from huggingface_hub import HfFolder, Repository, whoami
|
||||||
|
|
||||||
# TODO: remove and import from diffusers.utils when the new version of diffusers is released
|
|
||||||
from packaging import version
|
from packaging import version
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
from torch.utils.data import Dataset
|
from torch.utils.data import Dataset
|
||||||
@ -41,8 +40,8 @@ from transformers import CLIPTextModel, CLIPTokenizer
|
|||||||
|
|
||||||
# invokeai stuff
|
# invokeai stuff
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig, PagingArgumentParser
|
from invokeai.app.services.config import InvokeAIAppConfig, PagingArgumentParser
|
||||||
from invokeai.app.services.model_manager import ModelManagerService
|
from invokeai.backend.install.install_helper import initialize_record_store
|
||||||
from invokeai.backend.model_management.models import SubModelType
|
from invokeai.backend.model_manager import BaseModelType, ModelType
|
||||||
|
|
||||||
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
|
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
|
||||||
PIL_INTERPOLATION = {
|
PIL_INTERPOLATION = {
|
||||||
@ -77,7 +76,7 @@ def save_progress(text_encoder, placeholder_token_id, accelerator, placeholder_t
|
|||||||
torch.save(learned_embeds_dict, save_path)
|
torch.save(learned_embeds_dict, save_path)
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
def parse_args() -> Namespace:
|
||||||
config = InvokeAIAppConfig.get_config()
|
config = InvokeAIAppConfig.get_config()
|
||||||
parser = PagingArgumentParser(description="Textual inversion training")
|
parser = PagingArgumentParser(description="Textual inversion training")
|
||||||
general_group = parser.add_argument_group("General")
|
general_group = parser.add_argument_group("General")
|
||||||
@ -444,7 +443,7 @@ class TextualInversionDataset(Dataset):
|
|||||||
self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
|
self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
|
||||||
self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
|
self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
|
||||||
|
|
||||||
def __len__(self):
|
def __len__(self) -> int:
|
||||||
return self._length
|
return self._length
|
||||||
|
|
||||||
def __getitem__(self, i):
|
def __getitem__(self, i):
|
||||||
@ -509,11 +508,10 @@ def do_textual_inversion_training(
|
|||||||
initializer_token: str,
|
initializer_token: str,
|
||||||
save_steps: int = 500,
|
save_steps: int = 500,
|
||||||
only_save_embeds: bool = False,
|
only_save_embeds: bool = False,
|
||||||
revision: str = None,
|
tokenizer_name: Optional[str] = None,
|
||||||
tokenizer_name: str = None,
|
|
||||||
learnable_property: str = "object",
|
learnable_property: str = "object",
|
||||||
repeats: int = 100,
|
repeats: int = 100,
|
||||||
seed: int = None,
|
seed: Optional[int] = None,
|
||||||
resolution: int = 512,
|
resolution: int = 512,
|
||||||
center_crop: bool = False,
|
center_crop: bool = False,
|
||||||
train_batch_size: int = 16,
|
train_batch_size: int = 16,
|
||||||
@ -530,18 +528,18 @@ def do_textual_inversion_training(
|
|||||||
adam_weight_decay: float = 1e-02,
|
adam_weight_decay: float = 1e-02,
|
||||||
adam_epsilon: float = 1e-08,
|
adam_epsilon: float = 1e-08,
|
||||||
push_to_hub: bool = False,
|
push_to_hub: bool = False,
|
||||||
hub_token: str = None,
|
hub_token: Optional[str] = None,
|
||||||
logging_dir: Path = Path("logs"),
|
logging_dir: Path = Path("logs"),
|
||||||
mixed_precision: str = "fp16",
|
mixed_precision: str = "fp16",
|
||||||
allow_tf32: bool = False,
|
allow_tf32: bool = False,
|
||||||
report_to: str = "tensorboard",
|
report_to: str = "tensorboard",
|
||||||
local_rank: int = -1,
|
local_rank: int = -1,
|
||||||
checkpointing_steps: int = 500,
|
checkpointing_steps: int = 500,
|
||||||
resume_from_checkpoint: Path = None,
|
resume_from_checkpoint: Optional[Path] = None,
|
||||||
enable_xformers_memory_efficient_attention: bool = False,
|
enable_xformers_memory_efficient_attention: bool = False,
|
||||||
hub_model_id: str = None,
|
hub_model_id: Optional[str] = None,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
):
|
) -> None:
|
||||||
assert model, "Please specify a base model with --model"
|
assert model, "Please specify a base model with --model"
|
||||||
assert train_data_dir, "Please specify a directory containing the training images using --train_data_dir"
|
assert train_data_dir, "Please specify a directory containing the training images using --train_data_dir"
|
||||||
assert placeholder_token, "Please specify a trigger term using --placeholder_token"
|
assert placeholder_token, "Please specify a trigger term using --placeholder_token"
|
||||||
@ -564,8 +562,6 @@ def do_textual_inversion_training(
|
|||||||
project_config=accelerator_config,
|
project_config=accelerator_config,
|
||||||
)
|
)
|
||||||
|
|
||||||
model_manager = ModelManagerService(config, logger)
|
|
||||||
|
|
||||||
# Make one log on every process with the configuration for debugging.
|
# Make one log on every process with the configuration for debugging.
|
||||||
logging.basicConfig(
|
logging.basicConfig(
|
||||||
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
||||||
@ -603,44 +599,37 @@ def do_textual_inversion_training(
|
|||||||
elif output_dir is not None:
|
elif output_dir is not None:
|
||||||
os.makedirs(output_dir, exist_ok=True)
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
|
|
||||||
known_models = model_manager.model_names()
|
model_records = initialize_record_store(config)
|
||||||
model_name = model.split("/")[-1]
|
base, type, name = model.split("/") # note frontend still returns old-style keys
|
||||||
model_meta = next((mm for mm in known_models if mm[0].endswith(model_name)), None)
|
try:
|
||||||
assert model_meta is not None, f"Unknown model: {model}"
|
model_config = model_records.search_by_attr(
|
||||||
model_info = model_manager.model_info(*model_meta)
|
model_name=name, model_type=ModelType(type), base_model=BaseModelType(base)
|
||||||
assert model_info["model_format"] == "diffusers", "This script only works with models of type 'diffusers'"
|
)[0]
|
||||||
tokenizer_info = model_manager.get_model(*model_meta, submodel=SubModelType.Tokenizer)
|
except IndexError:
|
||||||
noise_scheduler_info = model_manager.get_model(*model_meta, submodel=SubModelType.Scheduler)
|
raise Exception(f"Unknown model {model}")
|
||||||
text_encoder_info = model_manager.get_model(*model_meta, submodel=SubModelType.TextEncoder)
|
model_path = config.models_path / model_config.path
|
||||||
vae_info = model_manager.get_model(*model_meta, submodel=SubModelType.Vae)
|
|
||||||
unet_info = model_manager.get_model(*model_meta, submodel=SubModelType.UNet)
|
|
||||||
|
|
||||||
pipeline_args = {"local_files_only": True}
|
pipeline_args = {"local_files_only": True}
|
||||||
if tokenizer_name:
|
if tokenizer_name:
|
||||||
tokenizer = CLIPTokenizer.from_pretrained(tokenizer_name, **pipeline_args)
|
tokenizer = CLIPTokenizer.from_pretrained(tokenizer_name, **pipeline_args)
|
||||||
else:
|
else:
|
||||||
tokenizer = CLIPTokenizer.from_pretrained(tokenizer_info.location, subfolder="tokenizer", **pipeline_args)
|
tokenizer = CLIPTokenizer.from_pretrained(model_path, subfolder="tokenizer", **pipeline_args)
|
||||||
|
|
||||||
# Load scheduler and models
|
# Load scheduler and models
|
||||||
noise_scheduler = DDPMScheduler.from_pretrained(
|
noise_scheduler = DDPMScheduler.from_pretrained(model_path, subfolder="scheduler", **pipeline_args)
|
||||||
noise_scheduler_info.location, subfolder="scheduler", **pipeline_args
|
|
||||||
)
|
|
||||||
text_encoder = CLIPTextModel.from_pretrained(
|
text_encoder = CLIPTextModel.from_pretrained(
|
||||||
text_encoder_info.location,
|
model_path,
|
||||||
subfolder="text_encoder",
|
subfolder="text_encoder",
|
||||||
revision=revision,
|
|
||||||
**pipeline_args,
|
**pipeline_args,
|
||||||
)
|
)
|
||||||
vae = AutoencoderKL.from_pretrained(
|
vae = AutoencoderKL.from_pretrained(
|
||||||
vae_info.location,
|
model_path,
|
||||||
subfolder="vae",
|
subfolder="vae",
|
||||||
revision=revision,
|
|
||||||
**pipeline_args,
|
**pipeline_args,
|
||||||
)
|
)
|
||||||
unet = UNet2DConditionModel.from_pretrained(
|
unet = UNet2DConditionModel.from_pretrained(
|
||||||
unet_info.location,
|
model_path,
|
||||||
subfolder="unet",
|
subfolder="unet",
|
||||||
revision=revision,
|
|
||||||
**pipeline_args,
|
**pipeline_args,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -728,7 +717,7 @@ def do_textual_inversion_training(
|
|||||||
max_train_steps = num_train_epochs * num_update_steps_per_epoch
|
max_train_steps = num_train_epochs * num_update_steps_per_epoch
|
||||||
overrode_max_train_steps = True
|
overrode_max_train_steps = True
|
||||||
|
|
||||||
lr_scheduler = get_scheduler(
|
scheduler = get_scheduler(
|
||||||
lr_scheduler,
|
lr_scheduler,
|
||||||
optimizer=optimizer,
|
optimizer=optimizer,
|
||||||
num_warmup_steps=lr_warmup_steps * gradient_accumulation_steps,
|
num_warmup_steps=lr_warmup_steps * gradient_accumulation_steps,
|
||||||
@ -737,7 +726,7 @@ def do_textual_inversion_training(
|
|||||||
|
|
||||||
# Prepare everything with our `accelerator`.
|
# Prepare everything with our `accelerator`.
|
||||||
text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
||||||
text_encoder, optimizer, train_dataloader, lr_scheduler
|
text_encoder, optimizer, train_dataloader, scheduler
|
||||||
)
|
)
|
||||||
|
|
||||||
# For mixed precision training we cast the unet and vae weights to half-precision
|
# For mixed precision training we cast the unet and vae weights to half-precision
|
||||||
@ -863,7 +852,7 @@ def do_textual_inversion_training(
|
|||||||
accelerator.backward(loss)
|
accelerator.backward(loss)
|
||||||
|
|
||||||
optimizer.step()
|
optimizer.step()
|
||||||
lr_scheduler.step()
|
scheduler.step()
|
||||||
optimizer.zero_grad()
|
optimizer.zero_grad()
|
||||||
|
|
||||||
# Let's make sure we don't update any embedding weights besides the newly added token
|
# Let's make sure we don't update any embedding weights besides the newly added token
|
||||||
@ -893,7 +882,7 @@ def do_textual_inversion_training(
|
|||||||
accelerator.save_state(save_path)
|
accelerator.save_state(save_path)
|
||||||
logger.info(f"Saved state to {save_path}")
|
logger.info(f"Saved state to {save_path}")
|
||||||
|
|
||||||
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
|
logs = {"loss": loss.detach().item(), "lr": scheduler.get_last_lr()[0]}
|
||||||
progress_bar.set_postfix(**logs)
|
progress_bar.set_postfix(**logs)
|
||||||
accelerator.log(logs, step=global_step)
|
accelerator.log(logs, step=global_step)
|
||||||
|
|
||||||
@ -910,7 +899,7 @@ def do_textual_inversion_training(
|
|||||||
save_full_model = not only_save_embeds
|
save_full_model = not only_save_embeds
|
||||||
if save_full_model:
|
if save_full_model:
|
||||||
pipeline = StableDiffusionPipeline.from_pretrained(
|
pipeline = StableDiffusionPipeline.from_pretrained(
|
||||||
unet_info.location,
|
model_path,
|
||||||
text_encoder=accelerator.unwrap_model(text_encoder),
|
text_encoder=accelerator.unwrap_model(text_encoder),
|
||||||
vae=vae,
|
vae=vae,
|
||||||
unet=unet,
|
unet=unet,
|
||||||
|
@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, Tuple, Union
|
|||||||
import diffusers
|
import diffusers
|
||||||
import torch
|
import torch
|
||||||
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
||||||
from diffusers.loaders import FromOriginalControlnetMixin
|
from diffusers.loaders import FromOriginalControlNetMixin
|
||||||
from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor
|
from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor
|
||||||
from diffusers.models.controlnet import ControlNetConditioningEmbedding, ControlNetOutput, zero_module
|
from diffusers.models.controlnet import ControlNetConditioningEmbedding, ControlNetOutput, zero_module
|
||||||
from diffusers.models.embeddings import (
|
from diffusers.models.embeddings import (
|
||||||
@ -14,8 +14,13 @@ from diffusers.models.embeddings import (
|
|||||||
Timesteps,
|
Timesteps,
|
||||||
)
|
)
|
||||||
from diffusers.models.modeling_utils import ModelMixin
|
from diffusers.models.modeling_utils import ModelMixin
|
||||||
from diffusers.models.unet_2d_blocks import CrossAttnDownBlock2D, DownBlock2D, UNetMidBlock2DCrossAttn, get_down_block
|
from diffusers.models.unets.unet_2d_blocks import (
|
||||||
from diffusers.models.unet_2d_condition import UNet2DConditionModel
|
CrossAttnDownBlock2D,
|
||||||
|
DownBlock2D,
|
||||||
|
UNetMidBlock2DCrossAttn,
|
||||||
|
get_down_block,
|
||||||
|
)
|
||||||
|
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
||||||
from torch import nn
|
from torch import nn
|
||||||
|
|
||||||
from invokeai.backend.util.logging import InvokeAILogger
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
@ -27,7 +32,7 @@ from invokeai.backend.util.logging import InvokeAILogger
|
|||||||
logger = InvokeAILogger.get_logger(__name__)
|
logger = InvokeAILogger.get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin):
|
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlNetMixin):
|
||||||
"""
|
"""
|
||||||
A ControlNet model.
|
A ControlNet model.
|
||||||
|
|
||||||
|
@ -286,7 +286,7 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path
|
|||||||
open_mode = "wb"
|
open_mode = "wb"
|
||||||
exist_size = 0
|
exist_size = 0
|
||||||
|
|
||||||
resp = requests.get(url, header, stream=True)
|
resp = requests.get(url, headers=header, stream=True, allow_redirects=True)
|
||||||
content_length = int(resp.headers.get("content-length", 0))
|
content_length = int(resp.headers.get("content-length", 0))
|
||||||
|
|
||||||
if dest.is_dir():
|
if dest.is_dir():
|
||||||
|
157
invokeai/configs/INITIAL_MODELS2.yaml
Normal file
157
invokeai/configs/INITIAL_MODELS2.yaml
Normal file
@ -0,0 +1,157 @@
|
|||||||
|
# This file predefines a few models that the user may want to install.
|
||||||
|
sd-1/main/stable-diffusion-v1-5:
|
||||||
|
description: Stable Diffusion version 1.5 diffusers model (4.27 GB)
|
||||||
|
source: runwayml/stable-diffusion-v1-5
|
||||||
|
recommended: True
|
||||||
|
default: True
|
||||||
|
sd-1/main/stable-diffusion-v1-5-inpainting:
|
||||||
|
description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)
|
||||||
|
source: runwayml/stable-diffusion-inpainting
|
||||||
|
recommended: True
|
||||||
|
sd-2/main/stable-diffusion-2-1:
|
||||||
|
description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)
|
||||||
|
source: stabilityai/stable-diffusion-2-1
|
||||||
|
recommended: False
|
||||||
|
sd-2/main/stable-diffusion-2-inpainting:
|
||||||
|
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
|
||||||
|
source: stabilityai/stable-diffusion-2-inpainting
|
||||||
|
recommended: False
|
||||||
|
sdxl/main/stable-diffusion-xl-base-1-0:
|
||||||
|
description: Stable Diffusion XL base model (12 GB)
|
||||||
|
source: stabilityai/stable-diffusion-xl-base-1.0
|
||||||
|
recommended: True
|
||||||
|
sdxl-refiner/main/stable-diffusion-xl-refiner-1-0:
|
||||||
|
description: Stable Diffusion XL refiner model (12 GB)
|
||||||
|
source: stabilityai/stable-diffusion-xl-refiner-1.0
|
||||||
|
recommended: False
|
||||||
|
sdxl/vae/sdxl-vae-fp16-fix:
|
||||||
|
description: Version of the SDXL-1.0 VAE that works in half precision mode
|
||||||
|
source: madebyollin/sdxl-vae-fp16-fix
|
||||||
|
recommended: True
|
||||||
|
sd-1/main/Analog-Diffusion:
|
||||||
|
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
|
||||||
|
source: wavymulder/Analog-Diffusion
|
||||||
|
recommended: False
|
||||||
|
sd-1/main/Deliberate:
|
||||||
|
description: Versatile model that produces detailed images up to 768px (4.27 GB)
|
||||||
|
source: XpucT/Deliberate
|
||||||
|
recommended: False
|
||||||
|
sd-1/main/Dungeons-and-Diffusion:
|
||||||
|
description: Dungeons & Dragons characters (2.13 GB)
|
||||||
|
source: 0xJustin/Dungeons-and-Diffusion
|
||||||
|
recommended: False
|
||||||
|
sd-1/main/dreamlike-photoreal-2:
|
||||||
|
description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)
|
||||||
|
source: dreamlike-art/dreamlike-photoreal-2.0
|
||||||
|
recommended: False
|
||||||
|
sd-1/main/Inkpunk-Diffusion:
|
||||||
|
description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)
|
||||||
|
source: Envvi/Inkpunk-Diffusion
|
||||||
|
recommended: False
|
||||||
|
sd-1/main/openjourney:
|
||||||
|
description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)
|
||||||
|
source: prompthero/openjourney
|
||||||
|
recommended: False
|
||||||
|
sd-1/main/seek.art_MEGA:
|
||||||
|
source: coreco/seek.art_MEGA
|
||||||
|
description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)
|
||||||
|
recommended: False
|
||||||
|
sd-1/main/trinart_stable_diffusion_v2:
|
||||||
|
description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)
|
||||||
|
source: naclbit/trinart_stable_diffusion_v2
|
||||||
|
recommended: False
|
||||||
|
sd-1/controlnet/qrcode_monster:
|
||||||
|
source: monster-labs/control_v1p_sd15_qrcode_monster
|
||||||
|
subfolder: v2
|
||||||
|
sd-1/controlnet/canny:
|
||||||
|
source: lllyasviel/control_v11p_sd15_canny
|
||||||
|
recommended: True
|
||||||
|
sd-1/controlnet/inpaint:
|
||||||
|
source: lllyasviel/control_v11p_sd15_inpaint
|
||||||
|
sd-1/controlnet/mlsd:
|
||||||
|
source: lllyasviel/control_v11p_sd15_mlsd
|
||||||
|
sd-1/controlnet/depth:
|
||||||
|
source: lllyasviel/control_v11f1p_sd15_depth
|
||||||
|
recommended: True
|
||||||
|
sd-1/controlnet/normal_bae:
|
||||||
|
source: lllyasviel/control_v11p_sd15_normalbae
|
||||||
|
sd-1/controlnet/seg:
|
||||||
|
source: lllyasviel/control_v11p_sd15_seg
|
||||||
|
sd-1/controlnet/lineart:
|
||||||
|
source: lllyasviel/control_v11p_sd15_lineart
|
||||||
|
recommended: True
|
||||||
|
sd-1/controlnet/lineart_anime:
|
||||||
|
source: lllyasviel/control_v11p_sd15s2_lineart_anime
|
||||||
|
sd-1/controlnet/openpose:
|
||||||
|
source: lllyasviel/control_v11p_sd15_openpose
|
||||||
|
recommended: True
|
||||||
|
sd-1/controlnet/scribble:
|
||||||
|
source: lllyasviel/control_v11p_sd15_scribble
|
||||||
|
recommended: False
|
||||||
|
sd-1/controlnet/softedge:
|
||||||
|
source: lllyasviel/control_v11p_sd15_softedge
|
||||||
|
sd-1/controlnet/shuffle:
|
||||||
|
source: lllyasviel/control_v11e_sd15_shuffle
|
||||||
|
sd-1/controlnet/tile:
|
||||||
|
source: lllyasviel/control_v11f1e_sd15_tile
|
||||||
|
sd-1/controlnet/ip2p:
|
||||||
|
source: lllyasviel/control_v11e_sd15_ip2p
|
||||||
|
sd-1/t2i_adapter/canny-sd15:
|
||||||
|
source: TencentARC/t2iadapter_canny_sd15v2
|
||||||
|
sd-1/t2i_adapter/sketch-sd15:
|
||||||
|
source: TencentARC/t2iadapter_sketch_sd15v2
|
||||||
|
sd-1/t2i_adapter/depth-sd15:
|
||||||
|
source: TencentARC/t2iadapter_depth_sd15v2
|
||||||
|
sd-1/t2i_adapter/zoedepth-sd15:
|
||||||
|
source: TencentARC/t2iadapter_zoedepth_sd15v1
|
||||||
|
sdxl/t2i_adapter/canny-sdxl:
|
||||||
|
source: TencentARC/t2i-adapter-canny-sdxl-1.0
|
||||||
|
sdxl/t2i_adapter/zoedepth-sdxl:
|
||||||
|
source: TencentARC/t2i-adapter-depth-zoe-sdxl-1.0
|
||||||
|
sdxl/t2i_adapter/lineart-sdxl:
|
||||||
|
source: TencentARC/t2i-adapter-lineart-sdxl-1.0
|
||||||
|
sdxl/t2i_adapter/sketch-sdxl:
|
||||||
|
source: TencentARC/t2i-adapter-sketch-sdxl-1.0
|
||||||
|
sd-1/embedding/EasyNegative:
|
||||||
|
source: https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors
|
||||||
|
recommended: True
|
||||||
|
description: A textual inversion to use in the negative prompt to reduce bad anatomy
|
||||||
|
sd-1/lora/FlatColor:
|
||||||
|
source: https://civitai.com/models/6433/loraflatcolor
|
||||||
|
recommended: True
|
||||||
|
description: A LoRA that generates scenery using solid blocks of color
|
||||||
|
sd-1/lora/Ink scenery:
|
||||||
|
source: https://civitai.com/api/download/models/83390
|
||||||
|
description: Generate india ink-like landscapes
|
||||||
|
sd-1/ip_adapter/ip_adapter_sd15:
|
||||||
|
source: InvokeAI/ip_adapter_sd15
|
||||||
|
recommended: True
|
||||||
|
requires:
|
||||||
|
- InvokeAI/ip_adapter_sd_image_encoder
|
||||||
|
description: IP-Adapter for SD 1.5 models
|
||||||
|
sd-1/ip_adapter/ip_adapter_plus_sd15:
|
||||||
|
source: InvokeAI/ip_adapter_plus_sd15
|
||||||
|
recommended: False
|
||||||
|
requires:
|
||||||
|
- InvokeAI/ip_adapter_sd_image_encoder
|
||||||
|
description: Refined IP-Adapter for SD 1.5 models
|
||||||
|
sd-1/ip_adapter/ip_adapter_plus_face_sd15:
|
||||||
|
source: InvokeAI/ip_adapter_plus_face_sd15
|
||||||
|
recommended: False
|
||||||
|
requires:
|
||||||
|
- InvokeAI/ip_adapter_sd_image_encoder
|
||||||
|
description: Refined IP-Adapter for SD 1.5 models, adapted for faces
|
||||||
|
sdxl/ip_adapter/ip_adapter_sdxl:
|
||||||
|
source: InvokeAI/ip_adapter_sdxl
|
||||||
|
recommended: False
|
||||||
|
requires:
|
||||||
|
- InvokeAI/ip_adapter_sdxl_image_encoder
|
||||||
|
description: IP-Adapter for SDXL models
|
||||||
|
any/clip_vision/ip_adapter_sd_image_encoder:
|
||||||
|
source: InvokeAI/ip_adapter_sd_image_encoder
|
||||||
|
recommended: False
|
||||||
|
description: Required model for using IP-Adapters with SD-1/2 models
|
||||||
|
any/clip_vision/ip_adapter_sdxl_image_encoder:
|
||||||
|
source: InvokeAI/ip_adapter_sdxl_image_encoder
|
||||||
|
recommended: False
|
||||||
|
description: Required model for using IP-Adapters with SDXL models
|
@ -2,3 +2,5 @@
|
|||||||
Wrapper for invokeai.backend.configure.invokeai_configure
|
Wrapper for invokeai.backend.configure.invokeai_configure
|
||||||
"""
|
"""
|
||||||
from ...backend.install.invokeai_configure import main as invokeai_configure # noqa: F401
|
from ...backend.install.invokeai_configure import main as invokeai_configure # noqa: F401
|
||||||
|
|
||||||
|
__all__ = ["invokeai_configure"]
|
||||||
|
@ -5,14 +5,14 @@ pip install <path_to_git_source>.
|
|||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
from distutils.version import LooseVersion
|
from distutils.version import LooseVersion
|
||||||
|
from importlib.metadata import PackageNotFoundError, distribution, distributions
|
||||||
|
|
||||||
import pkg_resources
|
|
||||||
import psutil
|
import psutil
|
||||||
import requests
|
import requests
|
||||||
from rich import box, print
|
from rich import box, print
|
||||||
from rich.console import Console, group
|
from rich.console import Console, group
|
||||||
from rich.panel import Panel
|
from rich.panel import Panel
|
||||||
from rich.prompt import Prompt
|
from rich.prompt import Confirm, Prompt
|
||||||
from rich.style import Style
|
from rich.style import Style
|
||||||
|
|
||||||
from invokeai.version import __version__
|
from invokeai.version import __version__
|
||||||
@ -61,6 +61,65 @@ def get_pypi_versions():
|
|||||||
return latest_version, latest_release_candidate, versions
|
return latest_version, latest_release_candidate, versions
|
||||||
|
|
||||||
|
|
||||||
|
def get_torch_extra_index_url() -> str | None:
|
||||||
|
"""
|
||||||
|
Determine torch wheel source URL and optional modules based on the user's OS.
|
||||||
|
"""
|
||||||
|
|
||||||
|
resolved_url = None
|
||||||
|
|
||||||
|
# In all other cases (like MacOS (MPS) or Linux+CUDA), there is no need to specify the extra index URL.
|
||||||
|
torch_package_urls = {
|
||||||
|
"windows_cuda": "https://download.pytorch.org/whl/cu121",
|
||||||
|
"linux_rocm": "https://download.pytorch.org/whl/rocm5.6",
|
||||||
|
"linux_cpu": "https://download.pytorch.org/whl/cpu",
|
||||||
|
}
|
||||||
|
|
||||||
|
nvidia_packages_present = (
|
||||||
|
len([d.metadata["Name"] for d in distributions() if d.metadata["Name"].startswith("nvidia")]) > 0
|
||||||
|
)
|
||||||
|
device = "cuda" if nvidia_packages_present else None
|
||||||
|
manual_gpu_selection_prompt = (
|
||||||
|
"[bold]We tried and failed to guess your GPU capabilities[/] :thinking_face:. Please select the GPU type:"
|
||||||
|
)
|
||||||
|
|
||||||
|
if OS == "Linux":
|
||||||
|
if not device:
|
||||||
|
# do we even need to offer a CPU-only install option?
|
||||||
|
print(manual_gpu_selection_prompt)
|
||||||
|
print("1: NVIDIA (CUDA)")
|
||||||
|
print("2: AMD (ROCm)")
|
||||||
|
print("3: No GPU - CPU only")
|
||||||
|
answer = Prompt.ask("Choice:", choices=["1", "2", "3"], default="1")
|
||||||
|
match answer:
|
||||||
|
case "1":
|
||||||
|
device = "cuda"
|
||||||
|
case "2":
|
||||||
|
device = "rocm"
|
||||||
|
case "3":
|
||||||
|
device = "cpu"
|
||||||
|
|
||||||
|
if device != "cuda":
|
||||||
|
resolved_url = torch_package_urls[f"linux_{device}"]
|
||||||
|
|
||||||
|
if OS == "Windows":
|
||||||
|
if not device:
|
||||||
|
print(manual_gpu_selection_prompt)
|
||||||
|
print("1: NVIDIA (CUDA)")
|
||||||
|
print("2: No GPU - CPU only")
|
||||||
|
answer = Prompt.ask("Your choice:", choices=["1", "2"], default="1")
|
||||||
|
match answer:
|
||||||
|
case "1":
|
||||||
|
device = "cuda"
|
||||||
|
case "2":
|
||||||
|
device = "cpu"
|
||||||
|
|
||||||
|
if device == "cuda":
|
||||||
|
resolved_url = torch_package_urls[f"windows_{device}"]
|
||||||
|
|
||||||
|
return resolved_url
|
||||||
|
|
||||||
|
|
||||||
def welcome(latest_release: str, latest_prerelease: str):
|
def welcome(latest_release: str, latest_prerelease: str):
|
||||||
@group()
|
@group()
|
||||||
def text():
|
def text():
|
||||||
@ -89,12 +148,11 @@ def welcome(latest_release: str, latest_prerelease: str):
|
|||||||
|
|
||||||
|
|
||||||
def get_extras():
|
def get_extras():
|
||||||
extras = ""
|
|
||||||
try:
|
try:
|
||||||
_ = pkg_resources.get_distribution("xformers")
|
distribution("xformers")
|
||||||
extras = "[xformers]"
|
extras = "[xformers]"
|
||||||
except pkg_resources.DistributionNotFound:
|
except PackageNotFoundError:
|
||||||
pass
|
extras = ""
|
||||||
return extras
|
return extras
|
||||||
|
|
||||||
|
|
||||||
@ -125,8 +183,22 @@ def main():
|
|||||||
|
|
||||||
extras = get_extras()
|
extras = get_extras()
|
||||||
|
|
||||||
|
console.line()
|
||||||
|
force_reinstall = Confirm.ask(
|
||||||
|
"[bold]Force reinstallation of all dependencies?[/] This [i]may[/] help fix a broken upgrade, but is usually not necessary.",
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
console.line()
|
||||||
|
flags = []
|
||||||
|
if (index_url := get_torch_extra_index_url()) is not None:
|
||||||
|
flags.append(f"--extra-index-url {index_url}")
|
||||||
|
if force_reinstall:
|
||||||
|
flags.append("--force-reinstall")
|
||||||
|
flags = " ".join(flags)
|
||||||
|
|
||||||
print(f":crossed_fingers: Upgrading to [yellow]{release}[/yellow]")
|
print(f":crossed_fingers: Upgrading to [yellow]{release}[/yellow]")
|
||||||
cmd = f'pip install "invokeai{extras}=={release}" --use-pep517 --upgrade'
|
cmd = f'pip install "invokeai{extras}=={release}" --use-pep517 --upgrade {flags}'
|
||||||
|
|
||||||
print("")
|
print("")
|
||||||
print("")
|
print("")
|
||||||
|
645
invokeai/frontend/install/model_install2.py
Normal file
645
invokeai/frontend/install/model_install2.py
Normal file
@ -0,0 +1,645 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
|
||||||
|
# Before running stable-diffusion on an internet-isolated machine,
|
||||||
|
# run this script from one with internet connectivity. The
|
||||||
|
# two machines must share a common .cache directory.
|
||||||
|
|
||||||
|
"""
|
||||||
|
This is the npyscreen frontend to the model installation application.
|
||||||
|
It is currently named model_install2.py, but will ultimately replace model_install.py.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import curses
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
import warnings
|
||||||
|
from argparse import Namespace
|
||||||
|
from shutil import get_terminal_size
|
||||||
|
from typing import Any, Dict, List, Optional, Set
|
||||||
|
|
||||||
|
import npyscreen
|
||||||
|
import torch
|
||||||
|
from npyscreen import widget
|
||||||
|
|
||||||
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
|
from invokeai.app.services.model_install import ModelInstallService
|
||||||
|
from invokeai.backend.install.install_helper import InstallHelper, InstallSelections, UnifiedModelInfo
|
||||||
|
from invokeai.backend.model_manager import ModelType
|
||||||
|
from invokeai.backend.util import choose_precision, choose_torch_device
|
||||||
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
|
from invokeai.frontend.install.widgets import (
|
||||||
|
MIN_COLS,
|
||||||
|
MIN_LINES,
|
||||||
|
CenteredTitleText,
|
||||||
|
CyclingForm,
|
||||||
|
MultiSelectColumns,
|
||||||
|
SingleSelectColumns,
|
||||||
|
TextBox,
|
||||||
|
WindowTooSmallException,
|
||||||
|
set_min_terminal_size,
|
||||||
|
)
|
||||||
|
|
||||||
|
warnings.filterwarnings("ignore", category=UserWarning) # noqa: E402
|
||||||
|
config = InvokeAIAppConfig.get_config()
|
||||||
|
logger = InvokeAILogger.get_logger("ModelInstallService")
|
||||||
|
logger.setLevel("WARNING")
|
||||||
|
# logger.setLevel('DEBUG')
|
||||||
|
|
||||||
|
# build a table mapping all non-printable characters to None
|
||||||
|
# for stripping control characters
|
||||||
|
# from https://stackoverflow.com/questions/92438/stripping-non-printable-characters-from-a-string-in-python
|
||||||
|
NOPRINT_TRANS_TABLE = {i: None for i in range(0, sys.maxunicode + 1) if not chr(i).isprintable()}
|
||||||
|
|
||||||
|
# maximum number of installed models we can display before overflowing vertically
|
||||||
|
MAX_OTHER_MODELS = 72
|
||||||
|
|
||||||
|
|
||||||
|
def make_printable(s: str) -> str:
|
||||||
|
"""Replace non-printable characters in a string."""
|
||||||
|
return s.translate(NOPRINT_TRANS_TABLE)
|
||||||
|
|
||||||
|
|
||||||
|
class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
||||||
|
"""Main form for interactive TUI."""
|
||||||
|
|
||||||
|
# for responsive resizing set to False, but this seems to cause a crash!
|
||||||
|
FIX_MINIMUM_SIZE_WHEN_CREATED = True
|
||||||
|
|
||||||
|
# for persistence
|
||||||
|
current_tab = 0
|
||||||
|
|
||||||
|
def __init__(self, parentApp: npyscreen.NPSAppManaged, name: str, multipage: bool = False, **keywords: Any):
|
||||||
|
self.multipage = multipage
|
||||||
|
self.subprocess = None
|
||||||
|
super().__init__(parentApp=parentApp, name=name, **keywords)
|
||||||
|
|
||||||
|
def create(self) -> None:
|
||||||
|
self.installer = self.parentApp.install_helper.installer
|
||||||
|
self.model_labels = self._get_model_labels()
|
||||||
|
self.keypress_timeout = 10
|
||||||
|
self.counter = 0
|
||||||
|
self.subprocess_connection = None
|
||||||
|
|
||||||
|
window_width, window_height = get_terminal_size()
|
||||||
|
|
||||||
|
# npyscreen has no typing hints
|
||||||
|
self.nextrely -= 1 # type: ignore
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.FixedText,
|
||||||
|
value="Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields. Cursor keys navigate, and <space> selects.",
|
||||||
|
editable=False,
|
||||||
|
color="CAUTION",
|
||||||
|
)
|
||||||
|
self.nextrely += 1 # type: ignore
|
||||||
|
self.tabs = self.add_widget_intelligent(
|
||||||
|
SingleSelectColumns,
|
||||||
|
values=[
|
||||||
|
"STARTERS",
|
||||||
|
"MAINS",
|
||||||
|
"CONTROLNETS",
|
||||||
|
"T2I-ADAPTERS",
|
||||||
|
"IP-ADAPTERS",
|
||||||
|
"LORAS",
|
||||||
|
"TI EMBEDDINGS",
|
||||||
|
],
|
||||||
|
value=[self.current_tab],
|
||||||
|
columns=7,
|
||||||
|
max_height=2,
|
||||||
|
relx=8,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.tabs.on_changed = self._toggle_tables
|
||||||
|
|
||||||
|
top_of_table = self.nextrely # type: ignore
|
||||||
|
self.starter_pipelines = self.add_starter_pipelines()
|
||||||
|
bottom_of_table = self.nextrely # type: ignore
|
||||||
|
|
||||||
|
self.nextrely = top_of_table
|
||||||
|
self.pipeline_models = self.add_pipeline_widgets(
|
||||||
|
model_type=ModelType.Main, window_width=window_width, exclude=self.starter_models
|
||||||
|
)
|
||||||
|
# self.pipeline_models['autoload_pending'] = True
|
||||||
|
bottom_of_table = max(bottom_of_table, self.nextrely)
|
||||||
|
|
||||||
|
self.nextrely = top_of_table
|
||||||
|
self.controlnet_models = self.add_model_widgets(
|
||||||
|
model_type=ModelType.ControlNet,
|
||||||
|
window_width=window_width,
|
||||||
|
)
|
||||||
|
bottom_of_table = max(bottom_of_table, self.nextrely)
|
||||||
|
|
||||||
|
self.nextrely = top_of_table
|
||||||
|
self.t2i_models = self.add_model_widgets(
|
||||||
|
model_type=ModelType.T2IAdapter,
|
||||||
|
window_width=window_width,
|
||||||
|
)
|
||||||
|
bottom_of_table = max(bottom_of_table, self.nextrely)
|
||||||
|
self.nextrely = top_of_table
|
||||||
|
self.ipadapter_models = self.add_model_widgets(
|
||||||
|
model_type=ModelType.IPAdapter,
|
||||||
|
window_width=window_width,
|
||||||
|
)
|
||||||
|
bottom_of_table = max(bottom_of_table, self.nextrely)
|
||||||
|
|
||||||
|
self.nextrely = top_of_table
|
||||||
|
self.lora_models = self.add_model_widgets(
|
||||||
|
model_type=ModelType.Lora,
|
||||||
|
window_width=window_width,
|
||||||
|
)
|
||||||
|
bottom_of_table = max(bottom_of_table, self.nextrely)
|
||||||
|
|
||||||
|
self.nextrely = top_of_table
|
||||||
|
self.ti_models = self.add_model_widgets(
|
||||||
|
model_type=ModelType.TextualInversion,
|
||||||
|
window_width=window_width,
|
||||||
|
)
|
||||||
|
bottom_of_table = max(bottom_of_table, self.nextrely)
|
||||||
|
|
||||||
|
self.nextrely = bottom_of_table + 1
|
||||||
|
|
||||||
|
self.nextrely += 1
|
||||||
|
back_label = "BACK"
|
||||||
|
cancel_label = "CANCEL"
|
||||||
|
current_position = self.nextrely
|
||||||
|
if self.multipage:
|
||||||
|
self.back_button = self.add_widget_intelligent(
|
||||||
|
npyscreen.ButtonPress,
|
||||||
|
name=back_label,
|
||||||
|
when_pressed_function=self.on_back,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.nextrely = current_position
|
||||||
|
self.cancel_button = self.add_widget_intelligent(
|
||||||
|
npyscreen.ButtonPress, name=cancel_label, when_pressed_function=self.on_cancel
|
||||||
|
)
|
||||||
|
self.nextrely = current_position
|
||||||
|
|
||||||
|
label = "APPLY CHANGES"
|
||||||
|
self.nextrely = current_position
|
||||||
|
self.done = self.add_widget_intelligent(
|
||||||
|
npyscreen.ButtonPress,
|
||||||
|
name=label,
|
||||||
|
relx=window_width - len(label) - 15,
|
||||||
|
when_pressed_function=self.on_done,
|
||||||
|
)
|
||||||
|
|
||||||
|
# This restores the selected page on return from an installation
|
||||||
|
for _i in range(1, self.current_tab + 1):
|
||||||
|
self.tabs.h_cursor_line_down(1)
|
||||||
|
self._toggle_tables([self.current_tab])
|
||||||
|
|
||||||
|
############# diffusers tab ##########
|
||||||
|
def add_starter_pipelines(self) -> dict[str, npyscreen.widget]:
|
||||||
|
"""Add widgets responsible for selecting diffusers models"""
|
||||||
|
widgets: Dict[str, npyscreen.widget] = {}
|
||||||
|
|
||||||
|
all_models = self.all_models # master dict of all models, indexed by key
|
||||||
|
model_list = [x for x in self.starter_models if all_models[x].type in ["main", "vae"]]
|
||||||
|
model_labels = [self.model_labels[x] for x in model_list]
|
||||||
|
|
||||||
|
widgets.update(
|
||||||
|
label1=self.add_widget_intelligent(
|
||||||
|
CenteredTitleText,
|
||||||
|
name="Select from a starter set of Stable Diffusion models from HuggingFace and Civitae.",
|
||||||
|
editable=False,
|
||||||
|
labelColor="CAUTION",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.nextrely -= 1
|
||||||
|
# if user has already installed some initial models, then don't patronize them
|
||||||
|
# by showing more recommendations
|
||||||
|
show_recommended = len(self.installed_models) == 0
|
||||||
|
|
||||||
|
checked = [
|
||||||
|
model_list.index(x)
|
||||||
|
for x in model_list
|
||||||
|
if (show_recommended and all_models[x].recommended) or all_models[x].installed
|
||||||
|
]
|
||||||
|
widgets.update(
|
||||||
|
models_selected=self.add_widget_intelligent(
|
||||||
|
MultiSelectColumns,
|
||||||
|
columns=1,
|
||||||
|
name="Install Starter Models",
|
||||||
|
values=model_labels,
|
||||||
|
value=checked,
|
||||||
|
max_height=len(model_list) + 1,
|
||||||
|
relx=4,
|
||||||
|
scroll_exit=True,
|
||||||
|
),
|
||||||
|
models=model_list,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.nextrely += 1
|
||||||
|
return widgets
|
||||||
|
|
||||||
|
############# Add a set of model install widgets ########
|
||||||
|
def add_model_widgets(
|
||||||
|
self,
|
||||||
|
model_type: ModelType,
|
||||||
|
window_width: int = 120,
|
||||||
|
install_prompt: Optional[str] = None,
|
||||||
|
exclude: Optional[Set[str]] = None,
|
||||||
|
) -> dict[str, npyscreen.widget]:
|
||||||
|
"""Generic code to create model selection widgets"""
|
||||||
|
if exclude is None:
|
||||||
|
exclude = set()
|
||||||
|
widgets: Dict[str, npyscreen.widget] = {}
|
||||||
|
all_models = self.all_models
|
||||||
|
model_list = sorted(
|
||||||
|
[x for x in all_models if all_models[x].type == model_type and x not in exclude],
|
||||||
|
key=lambda x: all_models[x].name or "",
|
||||||
|
)
|
||||||
|
model_labels = [self.model_labels[x] for x in model_list]
|
||||||
|
|
||||||
|
show_recommended = len(self.installed_models) == 0
|
||||||
|
truncated = False
|
||||||
|
if len(model_list) > 0:
|
||||||
|
max_width = max([len(x) for x in model_labels])
|
||||||
|
columns = window_width // (max_width + 8) # 8 characters for "[x] " and padding
|
||||||
|
columns = min(len(model_list), columns) or 1
|
||||||
|
prompt = (
|
||||||
|
install_prompt
|
||||||
|
or f"Select the desired {model_type.value.title()} models to install. Unchecked models will be purged from disk."
|
||||||
|
)
|
||||||
|
|
||||||
|
widgets.update(
|
||||||
|
label1=self.add_widget_intelligent(
|
||||||
|
CenteredTitleText,
|
||||||
|
name=prompt,
|
||||||
|
editable=False,
|
||||||
|
labelColor="CAUTION",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(model_labels) > MAX_OTHER_MODELS:
|
||||||
|
model_labels = model_labels[0:MAX_OTHER_MODELS]
|
||||||
|
truncated = True
|
||||||
|
|
||||||
|
widgets.update(
|
||||||
|
models_selected=self.add_widget_intelligent(
|
||||||
|
MultiSelectColumns,
|
||||||
|
columns=columns,
|
||||||
|
name=f"Install {model_type} Models",
|
||||||
|
values=model_labels,
|
||||||
|
value=[
|
||||||
|
model_list.index(x)
|
||||||
|
for x in model_list
|
||||||
|
if (show_recommended and all_models[x].recommended) or all_models[x].installed
|
||||||
|
],
|
||||||
|
max_height=len(model_list) // columns + 1,
|
||||||
|
relx=4,
|
||||||
|
scroll_exit=True,
|
||||||
|
),
|
||||||
|
models=model_list,
|
||||||
|
)
|
||||||
|
|
||||||
|
if truncated:
|
||||||
|
widgets.update(
|
||||||
|
warning_message=self.add_widget_intelligent(
|
||||||
|
npyscreen.FixedText,
|
||||||
|
value=f"Too many models to display (max={MAX_OTHER_MODELS}). Some are not displayed.",
|
||||||
|
editable=False,
|
||||||
|
color="CAUTION",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.nextrely += 1
|
||||||
|
widgets.update(
|
||||||
|
download_ids=self.add_widget_intelligent(
|
||||||
|
TextBox,
|
||||||
|
name="Additional URLs, or HuggingFace repo_ids to install (Space separated. Use shift-control-V to paste):",
|
||||||
|
max_height=6,
|
||||||
|
scroll_exit=True,
|
||||||
|
editable=True,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return widgets
|
||||||
|
|
||||||
|
### Tab for arbitrary diffusers widgets ###
|
||||||
|
def add_pipeline_widgets(
|
||||||
|
self,
|
||||||
|
model_type: ModelType = ModelType.Main,
|
||||||
|
window_width: int = 120,
|
||||||
|
**kwargs,
|
||||||
|
) -> dict[str, npyscreen.widget]:
|
||||||
|
"""Similar to add_model_widgets() but adds some additional widgets at the bottom
|
||||||
|
to support the autoload directory"""
|
||||||
|
widgets = self.add_model_widgets(
|
||||||
|
model_type=model_type,
|
||||||
|
window_width=window_width,
|
||||||
|
install_prompt=f"Installed {model_type.value.title()} models. Unchecked models in the InvokeAI root directory will be deleted. Enter URLs, paths or repo_ids to import.",
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
return widgets
|
||||||
|
|
||||||
|
def resize(self) -> None:
|
||||||
|
super().resize()
|
||||||
|
if s := self.starter_pipelines.get("models_selected"):
|
||||||
|
if model_list := self.starter_pipelines.get("models"):
|
||||||
|
s.values = [self.model_labels[x] for x in model_list]
|
||||||
|
|
||||||
|
def _toggle_tables(self, value: List[int]) -> None:
|
||||||
|
selected_tab = value[0]
|
||||||
|
widgets = [
|
||||||
|
self.starter_pipelines,
|
||||||
|
self.pipeline_models,
|
||||||
|
self.controlnet_models,
|
||||||
|
self.t2i_models,
|
||||||
|
self.ipadapter_models,
|
||||||
|
self.lora_models,
|
||||||
|
self.ti_models,
|
||||||
|
]
|
||||||
|
|
||||||
|
for group in widgets:
|
||||||
|
for _k, v in group.items():
|
||||||
|
try:
|
||||||
|
v.hidden = True
|
||||||
|
v.editable = False
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
for _k, v in widgets[selected_tab].items():
|
||||||
|
try:
|
||||||
|
v.hidden = False
|
||||||
|
if not isinstance(v, (npyscreen.FixedText, npyscreen.TitleFixedText, CenteredTitleText)):
|
||||||
|
v.editable = True
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
self.__class__.current_tab = selected_tab # for persistence
|
||||||
|
self.display()
|
||||||
|
|
||||||
|
def _get_model_labels(self) -> dict[str, str]:
|
||||||
|
"""Return a list of trimmed labels for all models."""
|
||||||
|
window_width, window_height = get_terminal_size()
|
||||||
|
checkbox_width = 4
|
||||||
|
spacing_width = 2
|
||||||
|
result = {}
|
||||||
|
|
||||||
|
models = self.all_models
|
||||||
|
label_width = max([len(models[x].name or "") for x in self.starter_models])
|
||||||
|
description_width = window_width - label_width - checkbox_width - spacing_width
|
||||||
|
|
||||||
|
for key in self.all_models:
|
||||||
|
description = models[key].description
|
||||||
|
description = (
|
||||||
|
description[0 : description_width - 3] + "..."
|
||||||
|
if description and len(description) > description_width
|
||||||
|
else description
|
||||||
|
if description
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
result[key] = f"%-{label_width}s %s" % (models[key].name, description)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _get_columns(self) -> int:
|
||||||
|
window_width, window_height = get_terminal_size()
|
||||||
|
cols = 4 if window_width > 240 else 3 if window_width > 160 else 2 if window_width > 80 else 1
|
||||||
|
return min(cols, len(self.installed_models))
|
||||||
|
|
||||||
|
def confirm_deletions(self, selections: InstallSelections) -> bool:
|
||||||
|
remove_models = selections.remove_models
|
||||||
|
if remove_models:
|
||||||
|
model_names = [self.all_models[x].name or "" for x in remove_models]
|
||||||
|
mods = "\n".join(model_names)
|
||||||
|
is_ok = npyscreen.notify_ok_cancel(
|
||||||
|
f"These unchecked models will be deleted from disk. Continue?\n---------\n{mods}"
|
||||||
|
)
|
||||||
|
assert isinstance(is_ok, bool) # npyscreen doesn't have return type annotations
|
||||||
|
return is_ok
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
@property
|
||||||
|
def all_models(self) -> Dict[str, UnifiedModelInfo]:
|
||||||
|
# npyscreen doesn't having typing hints
|
||||||
|
return self.parentApp.install_helper.all_models # type: ignore
|
||||||
|
|
||||||
|
@property
|
||||||
|
def starter_models(self) -> List[str]:
|
||||||
|
return self.parentApp.install_helper._starter_models # type: ignore
|
||||||
|
|
||||||
|
@property
|
||||||
|
def installed_models(self) -> List[str]:
|
||||||
|
return self.parentApp.install_helper._installed_models # type: ignore
|
||||||
|
|
||||||
|
def on_back(self) -> None:
|
||||||
|
self.parentApp.switchFormPrevious()
|
||||||
|
self.editing = False
|
||||||
|
|
||||||
|
def on_cancel(self) -> None:
|
||||||
|
self.parentApp.setNextForm(None)
|
||||||
|
self.parentApp.user_cancelled = True
|
||||||
|
self.editing = False
|
||||||
|
|
||||||
|
def on_done(self) -> None:
|
||||||
|
self.marshall_arguments()
|
||||||
|
if not self.confirm_deletions(self.parentApp.install_selections):
|
||||||
|
return
|
||||||
|
self.parentApp.setNextForm(None)
|
||||||
|
self.parentApp.user_cancelled = False
|
||||||
|
self.editing = False
|
||||||
|
|
||||||
|
def marshall_arguments(self) -> None:
|
||||||
|
"""
|
||||||
|
Assemble arguments and store as attributes of the application:
|
||||||
|
.starter_models: dict of model names to install from INITIAL_CONFIGURE.yaml
|
||||||
|
True => Install
|
||||||
|
False => Remove
|
||||||
|
.scan_directory: Path to a directory of models to scan and import
|
||||||
|
.autoscan_on_startup: True if invokeai should scan and import at startup time
|
||||||
|
.import_model_paths: list of URLs, repo_ids and file paths to import
|
||||||
|
"""
|
||||||
|
selections = self.parentApp.install_selections
|
||||||
|
all_models = self.all_models
|
||||||
|
|
||||||
|
# Defined models (in INITIAL_CONFIG.yaml or models.yaml) to add/remove
|
||||||
|
ui_sections = [
|
||||||
|
self.starter_pipelines,
|
||||||
|
self.pipeline_models,
|
||||||
|
self.controlnet_models,
|
||||||
|
self.t2i_models,
|
||||||
|
self.ipadapter_models,
|
||||||
|
self.lora_models,
|
||||||
|
self.ti_models,
|
||||||
|
]
|
||||||
|
for section in ui_sections:
|
||||||
|
if "models_selected" not in section:
|
||||||
|
continue
|
||||||
|
selected = {section["models"][x] for x in section["models_selected"].value}
|
||||||
|
models_to_install = [x for x in selected if not self.all_models[x].installed]
|
||||||
|
models_to_remove = [x for x in section["models"] if x not in selected and self.all_models[x].installed]
|
||||||
|
selections.remove_models.extend(models_to_remove)
|
||||||
|
selections.install_models.extend([all_models[x] for x in models_to_install])
|
||||||
|
|
||||||
|
# models located in the 'download_ids" section
|
||||||
|
for section in ui_sections:
|
||||||
|
if downloads := section.get("download_ids"):
|
||||||
|
models = [UnifiedModelInfo(source=x) for x in downloads.value.split()]
|
||||||
|
selections.install_models.extend(models)
|
||||||
|
|
||||||
|
|
||||||
|
class AddModelApplication(npyscreen.NPSAppManaged): # type: ignore
|
||||||
|
def __init__(self, opt: Namespace, install_helper: InstallHelper):
|
||||||
|
super().__init__()
|
||||||
|
self.program_opts = opt
|
||||||
|
self.user_cancelled = False
|
||||||
|
self.install_selections = InstallSelections()
|
||||||
|
self.install_helper = install_helper
|
||||||
|
|
||||||
|
def onStart(self) -> None:
|
||||||
|
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
|
||||||
|
self.main_form = self.addForm(
|
||||||
|
"MAIN",
|
||||||
|
addModelsForm,
|
||||||
|
name="Install Stable Diffusion Models",
|
||||||
|
cycle_widgets=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def list_models(installer: ModelInstallService, model_type: ModelType):
|
||||||
|
"""Print out all models of type model_type."""
|
||||||
|
models = installer.record_store.search_by_attr(model_type=model_type)
|
||||||
|
print(f"Installed models of type `{model_type}`:")
|
||||||
|
for model in models:
|
||||||
|
path = (config.models_path / model.path).resolve()
|
||||||
|
print(f"{model.name:40}{model.base.value:14}{path}")
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------------------------------
|
||||||
|
def select_and_download_models(opt: Namespace) -> None:
|
||||||
|
"""Prompt user for install/delete selections and execute."""
|
||||||
|
precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device()))
|
||||||
|
# unsure how to avoid a typing complaint in the next line: config.precision is an enumerated Literal
|
||||||
|
config.precision = precision # type: ignore
|
||||||
|
install_helper = InstallHelper(config, logger)
|
||||||
|
installer = install_helper.installer
|
||||||
|
|
||||||
|
if opt.list_models:
|
||||||
|
list_models(installer, opt.list_models)
|
||||||
|
|
||||||
|
elif opt.add or opt.delete:
|
||||||
|
selections = InstallSelections(
|
||||||
|
install_models=[UnifiedModelInfo(source=x) for x in (opt.add or [])], remove_models=opt.delete or []
|
||||||
|
)
|
||||||
|
install_helper.add_or_delete(selections)
|
||||||
|
|
||||||
|
elif opt.default_only:
|
||||||
|
selections = InstallSelections(install_models=[install_helper.default_model()])
|
||||||
|
install_helper.add_or_delete(selections)
|
||||||
|
|
||||||
|
elif opt.yes_to_all:
|
||||||
|
selections = InstallSelections(install_models=install_helper.recommended_models())
|
||||||
|
install_helper.add_or_delete(selections)
|
||||||
|
|
||||||
|
# this is where the TUI is called
|
||||||
|
else:
|
||||||
|
if not set_min_terminal_size(MIN_COLS, MIN_LINES):
|
||||||
|
raise WindowTooSmallException(
|
||||||
|
"Could not increase terminal size. Try running again with a larger window or smaller font size."
|
||||||
|
)
|
||||||
|
|
||||||
|
installApp = AddModelApplication(opt, install_helper)
|
||||||
|
try:
|
||||||
|
installApp.run()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("Aborted...")
|
||||||
|
sys.exit(-1)
|
||||||
|
|
||||||
|
install_helper.add_or_delete(installApp.install_selections)
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
def main() -> None:
|
||||||
|
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
||||||
|
parser.add_argument(
|
||||||
|
"--add",
|
||||||
|
nargs="*",
|
||||||
|
help="List of URLs, local paths or repo_ids of models to install",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--delete",
|
||||||
|
nargs="*",
|
||||||
|
help="List of names of models to delete. Use type:name to disambiguate, as in `controlnet:my_model`",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--full-precision",
|
||||||
|
dest="full_precision",
|
||||||
|
action=argparse.BooleanOptionalAction,
|
||||||
|
type=bool,
|
||||||
|
default=False,
|
||||||
|
help="use 32-bit weights instead of faster 16-bit weights",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--yes",
|
||||||
|
"-y",
|
||||||
|
dest="yes_to_all",
|
||||||
|
action="store_true",
|
||||||
|
help='answer "yes" to all prompts',
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--default_only",
|
||||||
|
action="store_true",
|
||||||
|
help="Only install the default model",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--list-models",
|
||||||
|
choices=[x.value for x in ModelType],
|
||||||
|
help="list installed models",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--root_dir",
|
||||||
|
dest="root",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="path to root of install directory",
|
||||||
|
)
|
||||||
|
opt = parser.parse_args()
|
||||||
|
|
||||||
|
invoke_args = []
|
||||||
|
if opt.root:
|
||||||
|
invoke_args.extend(["--root", opt.root])
|
||||||
|
if opt.full_precision:
|
||||||
|
invoke_args.extend(["--precision", "float32"])
|
||||||
|
config.parse_args(invoke_args)
|
||||||
|
logger = InvokeAILogger().get_logger(config=config)
|
||||||
|
|
||||||
|
if not config.model_conf_path.exists():
|
||||||
|
logger.info("Your InvokeAI root directory is not set up. Calling invokeai-configure.")
|
||||||
|
from invokeai.frontend.install.invokeai_configure import invokeai_configure
|
||||||
|
|
||||||
|
invokeai_configure()
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
try:
|
||||||
|
select_and_download_models(opt)
|
||||||
|
except AssertionError as e:
|
||||||
|
logger.error(e)
|
||||||
|
sys.exit(-1)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
curses.nocbreak()
|
||||||
|
curses.echo()
|
||||||
|
curses.endwin()
|
||||||
|
logger.info("Goodbye! Come back soon.")
|
||||||
|
except WindowTooSmallException as e:
|
||||||
|
logger.error(str(e))
|
||||||
|
except widget.NotEnoughSpaceForWidget as e:
|
||||||
|
if str(e).startswith("Height of 1 allocated"):
|
||||||
|
logger.error("Insufficient vertical space for the interface. Please make your window taller and try again")
|
||||||
|
input("Press any key to continue...")
|
||||||
|
except Exception as e:
|
||||||
|
if str(e).startswith("addwstr"):
|
||||||
|
logger.error(
|
||||||
|
"Insufficient horizontal space for the interface. Please make your window wider and try again."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print(f"An exception has occurred: {str(e)} Details:")
|
||||||
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
|
input("Press any key to continue...")
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
438
invokeai/frontend/merge/merge_diffusers2.py
Normal file
438
invokeai/frontend/merge/merge_diffusers2.py
Normal file
@ -0,0 +1,438 @@
|
|||||||
|
"""
|
||||||
|
invokeai.frontend.merge exports a single function called merge_diffusion_models().
|
||||||
|
|
||||||
|
It merges 2-3 models together and create a new InvokeAI-registered diffusion model.
|
||||||
|
|
||||||
|
Copyright (c) 2023-24 Lincoln Stein and the InvokeAI Development Team
|
||||||
|
"""
|
||||||
|
import argparse
|
||||||
|
import curses
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from argparse import Namespace
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Optional, Tuple
|
||||||
|
|
||||||
|
import npyscreen
|
||||||
|
from npyscreen import widget
|
||||||
|
|
||||||
|
import invokeai.backend.util.logging as logger
|
||||||
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
|
from invokeai.app.services.model_install import ModelInstallServiceBase
|
||||||
|
from invokeai.app.services.model_records import ModelRecordServiceBase
|
||||||
|
from invokeai.backend.install.install_helper import initialize_installer
|
||||||
|
from invokeai.backend.model_manager import (
|
||||||
|
BaseModelType,
|
||||||
|
ModelFormat,
|
||||||
|
ModelType,
|
||||||
|
ModelVariantType,
|
||||||
|
)
|
||||||
|
from invokeai.backend.model_manager.merge import ModelMerger
|
||||||
|
from invokeai.frontend.install.widgets import FloatTitleSlider, SingleSelectColumns, TextBox
|
||||||
|
|
||||||
|
config = InvokeAIAppConfig.get_config()
|
||||||
|
|
||||||
|
BASE_TYPES = [
|
||||||
|
(BaseModelType.StableDiffusion1, "Models Built on SD-1.x"),
|
||||||
|
(BaseModelType.StableDiffusion2, "Models Built on SD-2.x"),
|
||||||
|
(BaseModelType.StableDiffusionXL, "Models Built on SDXL"),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_args() -> Namespace:
|
||||||
|
parser = argparse.ArgumentParser(description="InvokeAI model merging")
|
||||||
|
parser.add_argument(
|
||||||
|
"--root_dir",
|
||||||
|
type=Path,
|
||||||
|
default=config.root,
|
||||||
|
help="Path to the invokeai runtime directory",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--front_end",
|
||||||
|
"--gui",
|
||||||
|
dest="front_end",
|
||||||
|
action="store_true",
|
||||||
|
default=False,
|
||||||
|
help="Activate the text-based graphical front end for collecting parameters. Aside from --root_dir, other parameters will be ignored.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--models",
|
||||||
|
dest="model_names",
|
||||||
|
type=str,
|
||||||
|
nargs="+",
|
||||||
|
help="Two to three model names to be merged",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--base_model",
|
||||||
|
type=str,
|
||||||
|
choices=[x[0].value for x in BASE_TYPES],
|
||||||
|
help="The base model shared by the models to be merged",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--merged_model_name",
|
||||||
|
"--destination",
|
||||||
|
dest="merged_model_name",
|
||||||
|
type=str,
|
||||||
|
help="Name of the output model. If not specified, will be the concatenation of the input model names.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--alpha",
|
||||||
|
type=float,
|
||||||
|
default=0.5,
|
||||||
|
help="The interpolation parameter, ranging from 0 to 1. It affects the ratio in which the checkpoints are merged. Higher values give more weight to the 2d and 3d models",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--interpolation",
|
||||||
|
dest="interp",
|
||||||
|
type=str,
|
||||||
|
choices=["weighted_sum", "sigmoid", "inv_sigmoid", "add_difference"],
|
||||||
|
default="weighted_sum",
|
||||||
|
help='Interpolation method to use. If three models are present, only "add_difference" will work.',
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--force",
|
||||||
|
action="store_true",
|
||||||
|
help="Try to merge models even if they are incompatible with each other",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--clobber",
|
||||||
|
"--overwrite",
|
||||||
|
dest="clobber",
|
||||||
|
action="store_true",
|
||||||
|
help="Overwrite the merged model if --merged_model_name already exists",
|
||||||
|
)
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
# ------------------------- GUI HERE -------------------------
|
||||||
|
class mergeModelsForm(npyscreen.FormMultiPageAction):
|
||||||
|
interpolations = ["weighted_sum", "sigmoid", "inv_sigmoid"]
|
||||||
|
|
||||||
|
def __init__(self, parentApp, name):
|
||||||
|
self.parentApp = parentApp
|
||||||
|
self.ALLOW_RESIZE = True
|
||||||
|
self.FIX_MINIMUM_SIZE_WHEN_CREATED = False
|
||||||
|
super().__init__(parentApp, name)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def model_record_store(self) -> ModelRecordServiceBase:
|
||||||
|
installer: ModelInstallServiceBase = self.parentApp.installer
|
||||||
|
return installer.record_store
|
||||||
|
|
||||||
|
def afterEditing(self) -> None:
|
||||||
|
self.parentApp.setNextForm(None)
|
||||||
|
|
||||||
|
def create(self) -> None:
|
||||||
|
window_height, window_width = curses.initscr().getmaxyx()
|
||||||
|
self.current_base = 0
|
||||||
|
self.models = self.get_models(BASE_TYPES[self.current_base][0])
|
||||||
|
self.model_names = [x[1] for x in self.models]
|
||||||
|
max_width = max([len(x) for x in self.model_names])
|
||||||
|
max_width += 6
|
||||||
|
horizontal_layout = max_width * 3 < window_width
|
||||||
|
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.FixedText,
|
||||||
|
color="CONTROL",
|
||||||
|
value="Select two models to merge and optionally a third.",
|
||||||
|
editable=False,
|
||||||
|
)
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.FixedText,
|
||||||
|
color="CONTROL",
|
||||||
|
value="Use up and down arrows to move, <space> to select an item, <tab> and <shift-tab> to move from one field to the next.",
|
||||||
|
editable=False,
|
||||||
|
)
|
||||||
|
self.nextrely += 1
|
||||||
|
self.base_select = self.add_widget_intelligent(
|
||||||
|
SingleSelectColumns,
|
||||||
|
values=[x[1] for x in BASE_TYPES],
|
||||||
|
value=[self.current_base],
|
||||||
|
columns=4,
|
||||||
|
max_height=2,
|
||||||
|
relx=8,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.base_select.on_changed = self._populate_models
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.FixedText,
|
||||||
|
value="MODEL 1",
|
||||||
|
color="GOOD",
|
||||||
|
editable=False,
|
||||||
|
rely=6 if horizontal_layout else None,
|
||||||
|
)
|
||||||
|
self.model1 = self.add_widget_intelligent(
|
||||||
|
npyscreen.SelectOne,
|
||||||
|
values=self.model_names,
|
||||||
|
value=0,
|
||||||
|
max_height=len(self.model_names),
|
||||||
|
max_width=max_width,
|
||||||
|
scroll_exit=True,
|
||||||
|
rely=7,
|
||||||
|
)
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.FixedText,
|
||||||
|
value="MODEL 2",
|
||||||
|
color="GOOD",
|
||||||
|
editable=False,
|
||||||
|
relx=max_width + 3 if horizontal_layout else None,
|
||||||
|
rely=6 if horizontal_layout else None,
|
||||||
|
)
|
||||||
|
self.model2 = self.add_widget_intelligent(
|
||||||
|
npyscreen.SelectOne,
|
||||||
|
name="(2)",
|
||||||
|
values=self.model_names,
|
||||||
|
value=1,
|
||||||
|
max_height=len(self.model_names),
|
||||||
|
max_width=max_width,
|
||||||
|
relx=max_width + 3 if horizontal_layout else None,
|
||||||
|
rely=7 if horizontal_layout else None,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.FixedText,
|
||||||
|
value="MODEL 3",
|
||||||
|
color="GOOD",
|
||||||
|
editable=False,
|
||||||
|
relx=max_width * 2 + 3 if horizontal_layout else None,
|
||||||
|
rely=6 if horizontal_layout else None,
|
||||||
|
)
|
||||||
|
models_plus_none = self.model_names.copy()
|
||||||
|
models_plus_none.insert(0, "None")
|
||||||
|
self.model3 = self.add_widget_intelligent(
|
||||||
|
npyscreen.SelectOne,
|
||||||
|
name="(3)",
|
||||||
|
values=models_plus_none,
|
||||||
|
value=0,
|
||||||
|
max_height=len(self.model_names) + 1,
|
||||||
|
max_width=max_width,
|
||||||
|
scroll_exit=True,
|
||||||
|
relx=max_width * 2 + 3 if horizontal_layout else None,
|
||||||
|
rely=7 if horizontal_layout else None,
|
||||||
|
)
|
||||||
|
for m in [self.model1, self.model2, self.model3]:
|
||||||
|
m.when_value_edited = self.models_changed
|
||||||
|
self.merged_model_name = self.add_widget_intelligent(
|
||||||
|
TextBox,
|
||||||
|
name="Name for merged model:",
|
||||||
|
labelColor="CONTROL",
|
||||||
|
max_height=3,
|
||||||
|
value="",
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.force = self.add_widget_intelligent(
|
||||||
|
npyscreen.Checkbox,
|
||||||
|
name="Force merge of models created by different diffusers library versions",
|
||||||
|
labelColor="CONTROL",
|
||||||
|
value=True,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.nextrely += 1
|
||||||
|
self.merge_method = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleSelectOne,
|
||||||
|
name="Merge Method:",
|
||||||
|
values=self.interpolations,
|
||||||
|
value=0,
|
||||||
|
labelColor="CONTROL",
|
||||||
|
max_height=len(self.interpolations) + 1,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.alpha = self.add_widget_intelligent(
|
||||||
|
FloatTitleSlider,
|
||||||
|
name="Weight (alpha) to assign to second and third models:",
|
||||||
|
out_of=1.0,
|
||||||
|
step=0.01,
|
||||||
|
lowest=0,
|
||||||
|
value=0.5,
|
||||||
|
labelColor="CONTROL",
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.model1.editing = True
|
||||||
|
|
||||||
|
def models_changed(self) -> None:
|
||||||
|
models = self.model1.values
|
||||||
|
selected_model1 = self.model1.value[0]
|
||||||
|
selected_model2 = self.model2.value[0]
|
||||||
|
selected_model3 = self.model3.value[0]
|
||||||
|
merged_model_name = f"{models[selected_model1]}+{models[selected_model2]}"
|
||||||
|
self.merged_model_name.value = merged_model_name
|
||||||
|
|
||||||
|
if selected_model3 > 0:
|
||||||
|
self.merge_method.values = ["add_difference ( A+(B-C) )"]
|
||||||
|
self.merged_model_name.value += f"+{models[selected_model3 -1]}" # In model3 there is one more element in the list (None). So we have to subtract one.
|
||||||
|
else:
|
||||||
|
self.merge_method.values = self.interpolations
|
||||||
|
self.merge_method.value = 0
|
||||||
|
|
||||||
|
def on_ok(self) -> None:
|
||||||
|
if self.validate_field_values() and self.check_for_overwrite():
|
||||||
|
self.parentApp.setNextForm(None)
|
||||||
|
self.editing = False
|
||||||
|
self.parentApp.merge_arguments = self.marshall_arguments()
|
||||||
|
npyscreen.notify("Starting the merge...")
|
||||||
|
else:
|
||||||
|
self.editing = True
|
||||||
|
|
||||||
|
def on_cancel(self) -> None:
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
def marshall_arguments(self) -> dict:
|
||||||
|
model_keys = [x[0] for x in self.models]
|
||||||
|
models = [
|
||||||
|
model_keys[self.model1.value[0]],
|
||||||
|
model_keys[self.model2.value[0]],
|
||||||
|
]
|
||||||
|
if self.model3.value[0] > 0:
|
||||||
|
models.append(model_keys[self.model3.value[0] - 1])
|
||||||
|
interp = "add_difference"
|
||||||
|
else:
|
||||||
|
interp = self.interpolations[self.merge_method.value[0]]
|
||||||
|
|
||||||
|
args = {
|
||||||
|
"model_keys": models,
|
||||||
|
"alpha": self.alpha.value,
|
||||||
|
"interp": interp,
|
||||||
|
"force": self.force.value,
|
||||||
|
"merged_model_name": self.merged_model_name.value,
|
||||||
|
}
|
||||||
|
return args
|
||||||
|
|
||||||
|
def check_for_overwrite(self) -> bool:
|
||||||
|
model_out = self.merged_model_name.value
|
||||||
|
if model_out not in self.model_names:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
result: bool = npyscreen.notify_yes_no(
|
||||||
|
f"The chosen merged model destination, {model_out}, is already in use. Overwrite?"
|
||||||
|
)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def validate_field_values(self) -> bool:
|
||||||
|
bad_fields = []
|
||||||
|
model_names = self.model_names
|
||||||
|
selected_models = {model_names[self.model1.value[0]], model_names[self.model2.value[0]]}
|
||||||
|
if self.model3.value[0] > 0:
|
||||||
|
selected_models.add(model_names[self.model3.value[0] - 1])
|
||||||
|
if len(selected_models) < 2:
|
||||||
|
bad_fields.append(f"Please select two or three DIFFERENT models to compare. You selected {selected_models}")
|
||||||
|
if len(bad_fields) > 0:
|
||||||
|
message = "The following problems were detected and must be corrected:"
|
||||||
|
for problem in bad_fields:
|
||||||
|
message += f"\n* {problem}"
|
||||||
|
npyscreen.notify_confirm(message)
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_models(self, base_model: Optional[BaseModelType] = None) -> List[Tuple[str, str]]: # key to name
|
||||||
|
models = [
|
||||||
|
(x.key, x.name)
|
||||||
|
for x in self.model_record_store.search_by_attr(model_type=ModelType.Main, base_model=base_model)
|
||||||
|
if x.format == ModelFormat("diffusers")
|
||||||
|
and hasattr(x, "variant")
|
||||||
|
and x.variant == ModelVariantType("normal")
|
||||||
|
]
|
||||||
|
return sorted(models, key=lambda x: x[1])
|
||||||
|
|
||||||
|
def _populate_models(self, value: List[int]) -> None:
|
||||||
|
base_model = BASE_TYPES[value[0]][0]
|
||||||
|
self.models = self.get_models(base_model)
|
||||||
|
self.model_names = [x[1] for x in self.models]
|
||||||
|
|
||||||
|
models_plus_none = self.model_names.copy()
|
||||||
|
models_plus_none.insert(0, "None")
|
||||||
|
self.model1.values = self.model_names
|
||||||
|
self.model2.values = self.model_names
|
||||||
|
self.model3.values = models_plus_none
|
||||||
|
|
||||||
|
self.display()
|
||||||
|
|
||||||
|
|
||||||
|
# npyscreen is untyped and causes mypy to get naggy
|
||||||
|
class Mergeapp(npyscreen.NPSAppManaged): # type: ignore
|
||||||
|
def __init__(self, installer: ModelInstallServiceBase):
|
||||||
|
"""Initialize the npyscreen application."""
|
||||||
|
super().__init__()
|
||||||
|
self.installer = installer
|
||||||
|
|
||||||
|
def onStart(self) -> None:
|
||||||
|
npyscreen.setTheme(npyscreen.Themes.ElegantTheme)
|
||||||
|
self.main = self.addForm("MAIN", mergeModelsForm, name="Merge Models Settings")
|
||||||
|
|
||||||
|
|
||||||
|
def run_gui(args: Namespace) -> None:
|
||||||
|
installer = initialize_installer(config)
|
||||||
|
mergeapp = Mergeapp(installer)
|
||||||
|
mergeapp.run()
|
||||||
|
merge_args = mergeapp.merge_arguments
|
||||||
|
merger = ModelMerger(installer)
|
||||||
|
merger.merge_diffusion_models_and_save(**merge_args)
|
||||||
|
logger.info(f'Models merged into new model: "{merge_args.merged_model_name}".')
|
||||||
|
|
||||||
|
|
||||||
|
def run_cli(args: Namespace) -> None:
|
||||||
|
assert args.alpha >= 0 and args.alpha <= 1.0, "alpha must be between 0 and 1"
|
||||||
|
assert (
|
||||||
|
args.model_names and len(args.model_names) >= 1 and len(args.model_names) <= 3
|
||||||
|
), "Please provide the --models argument to list 2 to 3 models to merge. Use --help for full usage."
|
||||||
|
|
||||||
|
if not args.merged_model_name:
|
||||||
|
args.merged_model_name = "+".join(args.model_names)
|
||||||
|
logger.info(f'No --merged_model_name provided. Defaulting to "{args.merged_model_name}"')
|
||||||
|
|
||||||
|
installer = initialize_installer(config)
|
||||||
|
store = installer.record_store
|
||||||
|
assert (
|
||||||
|
len(store.search_by_attr(args.merged_model_name, args.base_model, ModelType.Main)) == 0 or args.clobber
|
||||||
|
), f'A model named "{args.merged_model_name}" already exists. Use --clobber to overwrite.'
|
||||||
|
|
||||||
|
merger = ModelMerger(installer)
|
||||||
|
model_keys = []
|
||||||
|
for name in args.model_names:
|
||||||
|
if len(name) == 32 and re.match(r"^[0-9a-f]$", name):
|
||||||
|
model_keys.append(name)
|
||||||
|
else:
|
||||||
|
models = store.search_by_attr(
|
||||||
|
model_name=name, model_type=ModelType.Main, base_model=BaseModelType(args.base_model)
|
||||||
|
)
|
||||||
|
assert len(models) > 0, f"{name}: Unknown model"
|
||||||
|
assert len(models) < 2, f"{name}: More than one model by this name. Please specify the model key instead."
|
||||||
|
model_keys.append(models[0].key)
|
||||||
|
|
||||||
|
merger.merge_diffusion_models_and_save(
|
||||||
|
alpha=args.alpha,
|
||||||
|
model_keys=model_keys,
|
||||||
|
merged_model_name=args.merged_model_name,
|
||||||
|
interp=args.interp,
|
||||||
|
force=args.force,
|
||||||
|
)
|
||||||
|
logger.info(f'Models merged into new model: "{args.merged_model_name}".')
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
args = _parse_args()
|
||||||
|
if args.root_dir:
|
||||||
|
config.parse_args(["--root", str(args.root_dir)])
|
||||||
|
else:
|
||||||
|
config.parse_args([])
|
||||||
|
|
||||||
|
try:
|
||||||
|
if args.front_end:
|
||||||
|
run_gui(args)
|
||||||
|
else:
|
||||||
|
run_cli(args)
|
||||||
|
except widget.NotEnoughSpaceForWidget as e:
|
||||||
|
if str(e).startswith("Height of 1 allocated"):
|
||||||
|
logger.error("You need to have at least two diffusers models defined in models.yaml in order to merge")
|
||||||
|
else:
|
||||||
|
logger.error("Not enough room for the user interface. Try making this window larger.")
|
||||||
|
sys.exit(-1)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(str(e))
|
||||||
|
sys.exit(-1)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
sys.exit(-1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@ -3,7 +3,7 @@
|
|||||||
"""
|
"""
|
||||||
This is the frontend to "textual_inversion_training.py".
|
This is the frontend to "textual_inversion_training.py".
|
||||||
|
|
||||||
Copyright (c) 2023 Lincoln Stein and the InvokeAI Development Team
|
Copyright (c) 2023-24 Lincoln Stein and the InvokeAI Development Team
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
@ -14,7 +14,7 @@ import sys
|
|||||||
import traceback
|
import traceback
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Tuple
|
from typing import Dict, List, Optional, Tuple
|
||||||
|
|
||||||
import npyscreen
|
import npyscreen
|
||||||
from npyscreen import widget
|
from npyscreen import widget
|
||||||
@ -22,8 +22,9 @@ from omegaconf import OmegaConf
|
|||||||
|
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
|
from invokeai.backend.install.install_helper import initialize_installer
|
||||||
from ...backend.training import do_textual_inversion_training, parse_args
|
from invokeai.backend.model_manager import ModelType
|
||||||
|
from invokeai.backend.training import do_textual_inversion_training, parse_args
|
||||||
|
|
||||||
TRAINING_DATA = "text-inversion-training-data"
|
TRAINING_DATA = "text-inversion-training-data"
|
||||||
TRAINING_DIR = "text-inversion-output"
|
TRAINING_DIR = "text-inversion-output"
|
||||||
@ -44,19 +45,21 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
|||||||
precisions = ["no", "fp16", "bf16"]
|
precisions = ["no", "fp16", "bf16"]
|
||||||
learnable_properties = ["object", "style"]
|
learnable_properties = ["object", "style"]
|
||||||
|
|
||||||
def __init__(self, parentApp, name, saved_args=None):
|
def __init__(self, parentApp: npyscreen.NPSAppManaged, name: str, saved_args: Optional[Dict[str, str]] = None):
|
||||||
self.saved_args = saved_args or {}
|
self.saved_args = saved_args or {}
|
||||||
super().__init__(parentApp, name)
|
super().__init__(parentApp, name)
|
||||||
|
|
||||||
def afterEditing(self):
|
def afterEditing(self) -> None:
|
||||||
self.parentApp.setNextForm(None)
|
self.parentApp.setNextForm(None)
|
||||||
|
|
||||||
def create(self):
|
def create(self) -> None:
|
||||||
self.model_names, default = self.get_model_names()
|
self.model_names, default = self.get_model_names()
|
||||||
default_initializer_token = "★"
|
default_initializer_token = "★"
|
||||||
default_placeholder_token = ""
|
default_placeholder_token = ""
|
||||||
saved_args = self.saved_args
|
saved_args = self.saved_args
|
||||||
|
|
||||||
|
assert config is not None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
default = self.model_names.index(saved_args["model"])
|
default = self.model_names.index(saved_args["model"])
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -71,7 +74,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
|||||||
self.model = self.add_widget_intelligent(
|
self.model = self.add_widget_intelligent(
|
||||||
npyscreen.TitleSelectOne,
|
npyscreen.TitleSelectOne,
|
||||||
name="Model Name:",
|
name="Model Name:",
|
||||||
values=self.model_names,
|
values=sorted(self.model_names),
|
||||||
value=default,
|
value=default,
|
||||||
max_height=len(self.model_names) + 1,
|
max_height=len(self.model_names) + 1,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
@ -236,7 +239,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
|||||||
)
|
)
|
||||||
self.model.editing = True
|
self.model.editing = True
|
||||||
|
|
||||||
def initializer_changed(self):
|
def initializer_changed(self) -> None:
|
||||||
placeholder = self.placeholder_token.value
|
placeholder = self.placeholder_token.value
|
||||||
self.prompt_token.value = f"(Trigger by using <{placeholder}> in your prompts)"
|
self.prompt_token.value = f"(Trigger by using <{placeholder}> in your prompts)"
|
||||||
self.train_data_dir.value = str(config.root_dir / TRAINING_DATA / placeholder)
|
self.train_data_dir.value = str(config.root_dir / TRAINING_DATA / placeholder)
|
||||||
@ -275,10 +278,13 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
def get_model_names(self) -> Tuple[List[str], int]:
|
def get_model_names(self) -> Tuple[List[str], int]:
|
||||||
conf = OmegaConf.load(config.root_dir / "configs/models.yaml")
|
global config
|
||||||
model_names = [idx for idx in sorted(conf.keys()) if conf[idx].get("format", None) == "diffusers"]
|
assert config is not None
|
||||||
defaults = [idx for idx in range(len(model_names)) if "default" in conf[model_names[idx]]]
|
installer = initialize_installer(config)
|
||||||
default = defaults[0] if len(defaults) > 0 else 0
|
store = installer.record_store
|
||||||
|
main_models = store.search_by_attr(model_type=ModelType.Main)
|
||||||
|
model_names = [f"{x.base.value}/{x.type.value}/{x.name}" for x in main_models if x.format == "diffusers"]
|
||||||
|
default = 0
|
||||||
return (model_names, default)
|
return (model_names, default)
|
||||||
|
|
||||||
def marshall_arguments(self) -> dict:
|
def marshall_arguments(self) -> dict:
|
||||||
@ -326,7 +332,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
|||||||
|
|
||||||
|
|
||||||
class MyApplication(npyscreen.NPSAppManaged):
|
class MyApplication(npyscreen.NPSAppManaged):
|
||||||
def __init__(self, saved_args=None):
|
def __init__(self, saved_args: Optional[Dict[str, str]] = None):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.ti_arguments = None
|
self.ti_arguments = None
|
||||||
self.saved_args = saved_args
|
self.saved_args = saved_args
|
||||||
@ -341,11 +347,12 @@ class MyApplication(npyscreen.NPSAppManaged):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def copy_to_embeddings_folder(args: dict):
|
def copy_to_embeddings_folder(args: Dict[str, str]) -> None:
|
||||||
"""
|
"""
|
||||||
Copy learned_embeds.bin into the embeddings folder, and offer to
|
Copy learned_embeds.bin into the embeddings folder, and offer to
|
||||||
delete the full model and checkpoints.
|
delete the full model and checkpoints.
|
||||||
"""
|
"""
|
||||||
|
assert config is not None
|
||||||
source = Path(args["output_dir"], "learned_embeds.bin")
|
source = Path(args["output_dir"], "learned_embeds.bin")
|
||||||
dest_dir_name = args["placeholder_token"].strip("<>")
|
dest_dir_name = args["placeholder_token"].strip("<>")
|
||||||
destination = config.root_dir / "embeddings" / dest_dir_name
|
destination = config.root_dir / "embeddings" / dest_dir_name
|
||||||
@ -358,10 +365,11 @@ def copy_to_embeddings_folder(args: dict):
|
|||||||
logger.info(f'Keeping {args["output_dir"]}')
|
logger.info(f'Keeping {args["output_dir"]}')
|
||||||
|
|
||||||
|
|
||||||
def save_args(args: dict):
|
def save_args(args: dict) -> None:
|
||||||
"""
|
"""
|
||||||
Save the current argument values to an omegaconf file
|
Save the current argument values to an omegaconf file
|
||||||
"""
|
"""
|
||||||
|
assert config is not None
|
||||||
dest_dir = config.root_dir / TRAINING_DIR
|
dest_dir = config.root_dir / TRAINING_DIR
|
||||||
os.makedirs(dest_dir, exist_ok=True)
|
os.makedirs(dest_dir, exist_ok=True)
|
||||||
conf_file = dest_dir / CONF_FILE
|
conf_file = dest_dir / CONF_FILE
|
||||||
@ -373,6 +381,7 @@ def previous_args() -> dict:
|
|||||||
"""
|
"""
|
||||||
Get the previous arguments used.
|
Get the previous arguments used.
|
||||||
"""
|
"""
|
||||||
|
assert config is not None
|
||||||
conf_file = config.root_dir / TRAINING_DIR / CONF_FILE
|
conf_file = config.root_dir / TRAINING_DIR / CONF_FILE
|
||||||
try:
|
try:
|
||||||
conf = OmegaConf.load(conf_file)
|
conf = OmegaConf.load(conf_file)
|
||||||
@ -383,24 +392,26 @@ def previous_args() -> dict:
|
|||||||
return conf
|
return conf
|
||||||
|
|
||||||
|
|
||||||
def do_front_end(args: Namespace):
|
def do_front_end() -> None:
|
||||||
|
global config
|
||||||
saved_args = previous_args()
|
saved_args = previous_args()
|
||||||
myapplication = MyApplication(saved_args=saved_args)
|
myapplication = MyApplication(saved_args=saved_args)
|
||||||
myapplication.run()
|
myapplication.run()
|
||||||
|
|
||||||
if args := myapplication.ti_arguments:
|
if my_args := myapplication.ti_arguments:
|
||||||
os.makedirs(args["output_dir"], exist_ok=True)
|
os.makedirs(my_args["output_dir"], exist_ok=True)
|
||||||
|
|
||||||
# Automatically add angle brackets around the trigger
|
# Automatically add angle brackets around the trigger
|
||||||
if not re.match("^<.+>$", args["placeholder_token"]):
|
if not re.match("^<.+>$", my_args["placeholder_token"]):
|
||||||
args["placeholder_token"] = f"<{args['placeholder_token']}>"
|
my_args["placeholder_token"] = f"<{my_args['placeholder_token']}>"
|
||||||
|
|
||||||
args["only_save_embeds"] = True
|
my_args["only_save_embeds"] = True
|
||||||
save_args(args)
|
save_args(my_args)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
do_textual_inversion_training(InvokeAIAppConfig.get_config(), **args)
|
print(my_args)
|
||||||
copy_to_embeddings_folder(args)
|
do_textual_inversion_training(config, **my_args)
|
||||||
|
copy_to_embeddings_folder(my_args)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error("An exception occurred during training. The exception was:")
|
logger.error("An exception occurred during training. The exception was:")
|
||||||
logger.error(str(e))
|
logger.error(str(e))
|
||||||
@ -408,11 +419,12 @@ def do_front_end(args: Namespace):
|
|||||||
logger.error(traceback.format_exc())
|
logger.error(traceback.format_exc())
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main() -> None:
|
||||||
global config
|
global config
|
||||||
|
|
||||||
args = parse_args()
|
args: Namespace = parse_args()
|
||||||
config = InvokeAIAppConfig.get_config()
|
config = InvokeAIAppConfig.get_config()
|
||||||
|
config.parse_args([])
|
||||||
|
|
||||||
# change root if needed
|
# change root if needed
|
||||||
if args.root_dir:
|
if args.root_dir:
|
||||||
@ -420,7 +432,7 @@ def main():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
if args.front_end:
|
if args.front_end:
|
||||||
do_front_end(args)
|
do_front_end()
|
||||||
else:
|
else:
|
||||||
do_textual_inversion_training(config, **vars(args))
|
do_textual_inversion_training(config, **vars(args))
|
||||||
except AssertionError as e:
|
except AssertionError as e:
|
||||||
|
454
invokeai/frontend/training/textual_inversion2.py
Normal file
454
invokeai/frontend/training/textual_inversion2.py
Normal file
@ -0,0 +1,454 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
"""
|
||||||
|
This is the frontend to "textual_inversion_training.py".
|
||||||
|
|
||||||
|
Copyright (c) 2023-24 Lincoln Stein and the InvokeAI Development Team
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
from argparse import Namespace
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional, Tuple
|
||||||
|
|
||||||
|
import npyscreen
|
||||||
|
from npyscreen import widget
|
||||||
|
from omegaconf import OmegaConf
|
||||||
|
|
||||||
|
import invokeai.backend.util.logging as logger
|
||||||
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
|
from invokeai.backend.install.install_helper import initialize_installer
|
||||||
|
from invokeai.backend.model_manager import ModelType
|
||||||
|
from invokeai.backend.training import do_textual_inversion_training, parse_args
|
||||||
|
|
||||||
|
TRAINING_DATA = "text-inversion-training-data"
|
||||||
|
TRAINING_DIR = "text-inversion-output"
|
||||||
|
CONF_FILE = "preferences.conf"
|
||||||
|
config = None
|
||||||
|
|
||||||
|
|
||||||
|
class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||||
|
resolutions = [512, 768, 1024]
|
||||||
|
lr_schedulers = [
|
||||||
|
"linear",
|
||||||
|
"cosine",
|
||||||
|
"cosine_with_restarts",
|
||||||
|
"polynomial",
|
||||||
|
"constant",
|
||||||
|
"constant_with_warmup",
|
||||||
|
]
|
||||||
|
precisions = ["no", "fp16", "bf16"]
|
||||||
|
learnable_properties = ["object", "style"]
|
||||||
|
|
||||||
|
def __init__(self, parentApp: npyscreen.NPSAppManaged, name: str, saved_args: Optional[Dict[str, str]] = None):
|
||||||
|
self.saved_args = saved_args or {}
|
||||||
|
super().__init__(parentApp, name)
|
||||||
|
|
||||||
|
def afterEditing(self) -> None:
|
||||||
|
self.parentApp.setNextForm(None)
|
||||||
|
|
||||||
|
def create(self) -> None:
|
||||||
|
self.model_names, default = self.get_model_names()
|
||||||
|
default_initializer_token = "★"
|
||||||
|
default_placeholder_token = ""
|
||||||
|
saved_args = self.saved_args
|
||||||
|
|
||||||
|
assert config is not None
|
||||||
|
|
||||||
|
try:
|
||||||
|
default = self.model_names.index(saved_args["model"])
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.FixedText,
|
||||||
|
value="Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields, cursor arrows to make a selection, and space to toggle checkboxes.",
|
||||||
|
editable=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.model = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleSelectOne,
|
||||||
|
name="Model Name:",
|
||||||
|
values=sorted(self.model_names),
|
||||||
|
value=default,
|
||||||
|
max_height=len(self.model_names) + 1,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.placeholder_token = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleText,
|
||||||
|
name="Trigger Term:",
|
||||||
|
value="", # saved_args.get('placeholder_token',''), # to restore previous term
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.placeholder_token.when_value_edited = self.initializer_changed
|
||||||
|
self.nextrely -= 1
|
||||||
|
self.nextrelx += 30
|
||||||
|
self.prompt_token = self.add_widget_intelligent(
|
||||||
|
npyscreen.FixedText,
|
||||||
|
name="Trigger term for use in prompt",
|
||||||
|
value="",
|
||||||
|
editable=False,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.nextrelx -= 30
|
||||||
|
self.initializer_token = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleText,
|
||||||
|
name="Initializer:",
|
||||||
|
value=saved_args.get("initializer_token", default_initializer_token),
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.resume_from_checkpoint = self.add_widget_intelligent(
|
||||||
|
npyscreen.Checkbox,
|
||||||
|
name="Resume from last saved checkpoint",
|
||||||
|
value=False,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.learnable_property = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleSelectOne,
|
||||||
|
name="Learnable property:",
|
||||||
|
values=self.learnable_properties,
|
||||||
|
value=self.learnable_properties.index(saved_args.get("learnable_property", "object")),
|
||||||
|
max_height=4,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.train_data_dir = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleFilename,
|
||||||
|
name="Data Training Directory:",
|
||||||
|
select_dir=True,
|
||||||
|
must_exist=False,
|
||||||
|
value=str(
|
||||||
|
saved_args.get(
|
||||||
|
"train_data_dir",
|
||||||
|
config.root_dir / TRAINING_DATA / default_placeholder_token,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.output_dir = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleFilename,
|
||||||
|
name="Output Destination Directory:",
|
||||||
|
select_dir=True,
|
||||||
|
must_exist=False,
|
||||||
|
value=str(
|
||||||
|
saved_args.get(
|
||||||
|
"output_dir",
|
||||||
|
config.root_dir / TRAINING_DIR / default_placeholder_token,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.resolution = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleSelectOne,
|
||||||
|
name="Image resolution (pixels):",
|
||||||
|
values=self.resolutions,
|
||||||
|
value=self.resolutions.index(saved_args.get("resolution", 512)),
|
||||||
|
max_height=4,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.center_crop = self.add_widget_intelligent(
|
||||||
|
npyscreen.Checkbox,
|
||||||
|
name="Center crop images before resizing to resolution",
|
||||||
|
value=saved_args.get("center_crop", False),
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.mixed_precision = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleSelectOne,
|
||||||
|
name="Mixed Precision:",
|
||||||
|
values=self.precisions,
|
||||||
|
value=self.precisions.index(saved_args.get("mixed_precision", "fp16")),
|
||||||
|
max_height=4,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.num_train_epochs = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleSlider,
|
||||||
|
name="Number of training epochs:",
|
||||||
|
out_of=1000,
|
||||||
|
step=50,
|
||||||
|
lowest=1,
|
||||||
|
value=saved_args.get("num_train_epochs", 100),
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.max_train_steps = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleSlider,
|
||||||
|
name="Max Training Steps:",
|
||||||
|
out_of=10000,
|
||||||
|
step=500,
|
||||||
|
lowest=1,
|
||||||
|
value=saved_args.get("max_train_steps", 3000),
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.train_batch_size = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleSlider,
|
||||||
|
name="Batch Size (reduce if you run out of memory):",
|
||||||
|
out_of=50,
|
||||||
|
step=1,
|
||||||
|
lowest=1,
|
||||||
|
value=saved_args.get("train_batch_size", 8),
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.gradient_accumulation_steps = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleSlider,
|
||||||
|
name="Gradient Accumulation Steps (may need to decrease this to resume from a checkpoint):",
|
||||||
|
out_of=10,
|
||||||
|
step=1,
|
||||||
|
lowest=1,
|
||||||
|
value=saved_args.get("gradient_accumulation_steps", 4),
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.lr_warmup_steps = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleSlider,
|
||||||
|
name="Warmup Steps:",
|
||||||
|
out_of=100,
|
||||||
|
step=1,
|
||||||
|
lowest=0,
|
||||||
|
value=saved_args.get("lr_warmup_steps", 0),
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.learning_rate = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleText,
|
||||||
|
name="Learning Rate:",
|
||||||
|
value=str(
|
||||||
|
saved_args.get("learning_rate", "5.0e-04"),
|
||||||
|
),
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.scale_lr = self.add_widget_intelligent(
|
||||||
|
npyscreen.Checkbox,
|
||||||
|
name="Scale learning rate by number GPUs, steps and batch size",
|
||||||
|
value=saved_args.get("scale_lr", True),
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.enable_xformers_memory_efficient_attention = self.add_widget_intelligent(
|
||||||
|
npyscreen.Checkbox,
|
||||||
|
name="Use xformers acceleration",
|
||||||
|
value=saved_args.get("enable_xformers_memory_efficient_attention", False),
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.lr_scheduler = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleSelectOne,
|
||||||
|
name="Learning rate scheduler:",
|
||||||
|
values=self.lr_schedulers,
|
||||||
|
max_height=7,
|
||||||
|
value=self.lr_schedulers.index(saved_args.get("lr_scheduler", "constant")),
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.model.editing = True
|
||||||
|
|
||||||
|
def initializer_changed(self) -> None:
|
||||||
|
placeholder = self.placeholder_token.value
|
||||||
|
self.prompt_token.value = f"(Trigger by using <{placeholder}> in your prompts)"
|
||||||
|
self.train_data_dir.value = str(config.root_dir / TRAINING_DATA / placeholder)
|
||||||
|
self.output_dir.value = str(config.root_dir / TRAINING_DIR / placeholder)
|
||||||
|
self.resume_from_checkpoint.value = Path(self.output_dir.value).exists()
|
||||||
|
|
||||||
|
def on_ok(self):
|
||||||
|
if self.validate_field_values():
|
||||||
|
self.parentApp.setNextForm(None)
|
||||||
|
self.editing = False
|
||||||
|
self.parentApp.ti_arguments = self.marshall_arguments()
|
||||||
|
npyscreen.notify("Launching textual inversion training. This will take a while...")
|
||||||
|
else:
|
||||||
|
self.editing = True
|
||||||
|
|
||||||
|
def ok_cancel(self):
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
def validate_field_values(self) -> bool:
|
||||||
|
bad_fields = []
|
||||||
|
if self.model.value is None:
|
||||||
|
bad_fields.append("Model Name must correspond to a known model in models.yaml")
|
||||||
|
if not re.match("^[a-zA-Z0-9.-]+$", self.placeholder_token.value):
|
||||||
|
bad_fields.append("Trigger term must only contain alphanumeric characters, the dot and hyphen")
|
||||||
|
if self.train_data_dir.value is None:
|
||||||
|
bad_fields.append("Data Training Directory cannot be empty")
|
||||||
|
if self.output_dir.value is None:
|
||||||
|
bad_fields.append("The Output Destination Directory cannot be empty")
|
||||||
|
if len(bad_fields) > 0:
|
||||||
|
message = "The following problems were detected and must be corrected:"
|
||||||
|
for problem in bad_fields:
|
||||||
|
message += f"\n* {problem}"
|
||||||
|
npyscreen.notify_confirm(message)
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_model_names(self) -> Tuple[List[str], int]:
|
||||||
|
global config
|
||||||
|
assert config is not None
|
||||||
|
installer = initialize_installer(config)
|
||||||
|
store = installer.record_store
|
||||||
|
main_models = store.search_by_attr(model_type=ModelType.Main)
|
||||||
|
model_names = [f"{x.base.value}/{x.type.value}/{x.name}" for x in main_models if x.format == "diffusers"]
|
||||||
|
default = 0
|
||||||
|
return (model_names, default)
|
||||||
|
|
||||||
|
def marshall_arguments(self) -> dict:
|
||||||
|
args = {}
|
||||||
|
|
||||||
|
# the choices
|
||||||
|
args.update(
|
||||||
|
model=self.model_names[self.model.value[0]],
|
||||||
|
resolution=self.resolutions[self.resolution.value[0]],
|
||||||
|
lr_scheduler=self.lr_schedulers[self.lr_scheduler.value[0]],
|
||||||
|
mixed_precision=self.precisions[self.mixed_precision.value[0]],
|
||||||
|
learnable_property=self.learnable_properties[self.learnable_property.value[0]],
|
||||||
|
)
|
||||||
|
|
||||||
|
# all the strings and booleans
|
||||||
|
for attr in (
|
||||||
|
"initializer_token",
|
||||||
|
"placeholder_token",
|
||||||
|
"train_data_dir",
|
||||||
|
"output_dir",
|
||||||
|
"scale_lr",
|
||||||
|
"center_crop",
|
||||||
|
"enable_xformers_memory_efficient_attention",
|
||||||
|
):
|
||||||
|
args[attr] = getattr(self, attr).value
|
||||||
|
|
||||||
|
# all the integers
|
||||||
|
for attr in (
|
||||||
|
"train_batch_size",
|
||||||
|
"gradient_accumulation_steps",
|
||||||
|
"num_train_epochs",
|
||||||
|
"max_train_steps",
|
||||||
|
"lr_warmup_steps",
|
||||||
|
):
|
||||||
|
args[attr] = int(getattr(self, attr).value)
|
||||||
|
|
||||||
|
# the floats (just one)
|
||||||
|
args.update(learning_rate=float(self.learning_rate.value))
|
||||||
|
|
||||||
|
# a special case
|
||||||
|
if self.resume_from_checkpoint.value and Path(self.output_dir.value).exists():
|
||||||
|
args["resume_from_checkpoint"] = "latest"
|
||||||
|
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
class MyApplication(npyscreen.NPSAppManaged):
|
||||||
|
def __init__(self, saved_args: Optional[Dict[str, str]] = None):
|
||||||
|
super().__init__()
|
||||||
|
self.ti_arguments = None
|
||||||
|
self.saved_args = saved_args
|
||||||
|
|
||||||
|
def onStart(self):
|
||||||
|
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
|
||||||
|
self.main = self.addForm(
|
||||||
|
"MAIN",
|
||||||
|
textualInversionForm,
|
||||||
|
name="Textual Inversion Settings",
|
||||||
|
saved_args=self.saved_args,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def copy_to_embeddings_folder(args: Dict[str, str]) -> None:
|
||||||
|
"""
|
||||||
|
Copy learned_embeds.bin into the embeddings folder, and offer to
|
||||||
|
delete the full model and checkpoints.
|
||||||
|
"""
|
||||||
|
assert config is not None
|
||||||
|
source = Path(args["output_dir"], "learned_embeds.bin")
|
||||||
|
dest_dir_name = args["placeholder_token"].strip("<>")
|
||||||
|
destination = config.root_dir / "embeddings" / dest_dir_name
|
||||||
|
os.makedirs(destination, exist_ok=True)
|
||||||
|
logger.info(f"Training completed. Copying learned_embeds.bin into {str(destination)}")
|
||||||
|
shutil.copy(source, destination)
|
||||||
|
if (input("Delete training logs and intermediate checkpoints? [y] ") or "y").startswith(("y", "Y")):
|
||||||
|
shutil.rmtree(Path(args["output_dir"]))
|
||||||
|
else:
|
||||||
|
logger.info(f'Keeping {args["output_dir"]}')
|
||||||
|
|
||||||
|
|
||||||
|
def save_args(args: dict) -> None:
|
||||||
|
"""
|
||||||
|
Save the current argument values to an omegaconf file
|
||||||
|
"""
|
||||||
|
assert config is not None
|
||||||
|
dest_dir = config.root_dir / TRAINING_DIR
|
||||||
|
os.makedirs(dest_dir, exist_ok=True)
|
||||||
|
conf_file = dest_dir / CONF_FILE
|
||||||
|
conf = OmegaConf.create(args)
|
||||||
|
OmegaConf.save(config=conf, f=conf_file)
|
||||||
|
|
||||||
|
|
||||||
|
def previous_args() -> dict:
|
||||||
|
"""
|
||||||
|
Get the previous arguments used.
|
||||||
|
"""
|
||||||
|
assert config is not None
|
||||||
|
conf_file = config.root_dir / TRAINING_DIR / CONF_FILE
|
||||||
|
try:
|
||||||
|
conf = OmegaConf.load(conf_file)
|
||||||
|
conf["placeholder_token"] = conf["placeholder_token"].strip("<>")
|
||||||
|
except Exception:
|
||||||
|
conf = None
|
||||||
|
|
||||||
|
return conf
|
||||||
|
|
||||||
|
|
||||||
|
def do_front_end() -> None:
|
||||||
|
global config
|
||||||
|
saved_args = previous_args()
|
||||||
|
myapplication = MyApplication(saved_args=saved_args)
|
||||||
|
myapplication.run()
|
||||||
|
|
||||||
|
if my_args := myapplication.ti_arguments:
|
||||||
|
os.makedirs(my_args["output_dir"], exist_ok=True)
|
||||||
|
|
||||||
|
# Automatically add angle brackets around the trigger
|
||||||
|
if not re.match("^<.+>$", my_args["placeholder_token"]):
|
||||||
|
my_args["placeholder_token"] = f"<{my_args['placeholder_token']}>"
|
||||||
|
|
||||||
|
my_args["only_save_embeds"] = True
|
||||||
|
save_args(my_args)
|
||||||
|
|
||||||
|
try:
|
||||||
|
print(my_args)
|
||||||
|
do_textual_inversion_training(config, **my_args)
|
||||||
|
copy_to_embeddings_folder(my_args)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error("An exception occurred during training. The exception was:")
|
||||||
|
logger.error(str(e))
|
||||||
|
logger.error("DETAILS:")
|
||||||
|
logger.error(traceback.format_exc())
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
global config
|
||||||
|
|
||||||
|
args: Namespace = parse_args()
|
||||||
|
config = InvokeAIAppConfig.get_config()
|
||||||
|
config.parse_args([])
|
||||||
|
|
||||||
|
# change root if needed
|
||||||
|
if args.root_dir:
|
||||||
|
config.root = args.root_dir
|
||||||
|
|
||||||
|
try:
|
||||||
|
if args.front_end:
|
||||||
|
do_front_end()
|
||||||
|
else:
|
||||||
|
do_textual_inversion_training(config, **vars(args))
|
||||||
|
except AssertionError as e:
|
||||||
|
logger.error(e)
|
||||||
|
sys.exit(-1)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
pass
|
||||||
|
except (widget.NotEnoughSpaceForWidget, Exception) as e:
|
||||||
|
if str(e).startswith("Height of 1 allocated"):
|
||||||
|
logger.error("You need to have at least one diffusers models defined in models.yaml in order to train")
|
||||||
|
elif str(e).startswith("addwstr"):
|
||||||
|
logger.error("Not enough window space for the interface. Please make your window larger and try again.")
|
||||||
|
else:
|
||||||
|
logger.error(e)
|
||||||
|
sys.exit(-1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@ -1,131 +1,26 @@
|
|||||||
module.exports = {
|
module.exports = {
|
||||||
env: {
|
extends: ['@invoke-ai/eslint-config-react'],
|
||||||
browser: true,
|
plugins: ['path', 'i18next'],
|
||||||
es6: true,
|
|
||||||
node: true,
|
|
||||||
},
|
|
||||||
extends: [
|
|
||||||
'eslint:recommended',
|
|
||||||
'plugin:@typescript-eslint/recommended',
|
|
||||||
'plugin:react/recommended',
|
|
||||||
'plugin:react-hooks/recommended',
|
|
||||||
'plugin:react/jsx-runtime',
|
|
||||||
'prettier',
|
|
||||||
'plugin:storybook/recommended',
|
|
||||||
],
|
|
||||||
parser: '@typescript-eslint/parser',
|
|
||||||
parserOptions: {
|
|
||||||
ecmaFeatures: {
|
|
||||||
jsx: true,
|
|
||||||
},
|
|
||||||
ecmaVersion: 2018,
|
|
||||||
sourceType: 'module',
|
|
||||||
},
|
|
||||||
plugins: [
|
|
||||||
'react',
|
|
||||||
'@typescript-eslint',
|
|
||||||
'eslint-plugin-react-hooks',
|
|
||||||
'i18next',
|
|
||||||
'path',
|
|
||||||
'unused-imports',
|
|
||||||
'simple-import-sort',
|
|
||||||
'eslint-plugin-import',
|
|
||||||
// These rules are too strict for normal usage, but are useful for optimizing rerenders
|
|
||||||
// '@arthurgeron/react-usememo',
|
|
||||||
],
|
|
||||||
root: true,
|
|
||||||
rules: {
|
rules: {
|
||||||
|
// TODO(psyche): Enable this rule. Requires no default exports in components - many changes.
|
||||||
|
'react-refresh/only-export-components': 'off',
|
||||||
|
// TODO(psyche): Enable this rule. Requires a lot of eslint-disable-next-line comments.
|
||||||
|
'@typescript-eslint/consistent-type-assertions': 'off',
|
||||||
|
// https://github.com/qdanik/eslint-plugin-path
|
||||||
'path/no-relative-imports': ['error', { maxDepth: 0 }],
|
'path/no-relative-imports': ['error', { maxDepth: 0 }],
|
||||||
curly: 'error',
|
// https://github.com/edvardchen/eslint-plugin-i18next/blob/HEAD/docs/rules/no-literal-string.md
|
||||||
'i18next/no-literal-string': 'warn',
|
'i18next/no-literal-string': 'error',
|
||||||
'react/jsx-no-bind': ['error', { allowBind: true }],
|
|
||||||
'react/jsx-curly-brace-presence': [
|
|
||||||
'error',
|
|
||||||
{ props: 'never', children: 'never' },
|
|
||||||
],
|
|
||||||
'react-hooks/exhaustive-deps': 'error',
|
|
||||||
'no-var': 'error',
|
|
||||||
'brace-style': 'error',
|
|
||||||
'prefer-template': 'error',
|
|
||||||
'import/no-duplicates': 'error',
|
|
||||||
radix: 'error',
|
|
||||||
'space-before-blocks': 'error',
|
|
||||||
'import/prefer-default-export': 'off',
|
|
||||||
'@typescript-eslint/no-unused-vars': 'off',
|
|
||||||
'unused-imports/no-unused-imports': 'error',
|
|
||||||
'unused-imports/no-unused-vars': [
|
|
||||||
'warn',
|
|
||||||
{
|
|
||||||
vars: 'all',
|
|
||||||
varsIgnorePattern: '^_',
|
|
||||||
args: 'after-used',
|
|
||||||
argsIgnorePattern: '^_',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
// These rules are too strict for normal usage, but are useful for optimizing rerenders
|
|
||||||
// '@arthurgeron/react-usememo/require-usememo': [
|
|
||||||
// 'warn',
|
|
||||||
// {
|
|
||||||
// strict: false,
|
|
||||||
// checkHookReturnObject: false,
|
|
||||||
// fix: { addImports: true },
|
|
||||||
// checkHookCalls: false,
|
|
||||||
|
|
||||||
// },
|
|
||||||
// ],
|
|
||||||
// '@arthurgeron/react-usememo/require-memo': 'warn',
|
|
||||||
'@typescript-eslint/ban-ts-comment': 'warn',
|
|
||||||
'@typescript-eslint/no-explicit-any': 'warn',
|
|
||||||
'@typescript-eslint/no-empty-interface': [
|
|
||||||
'error',
|
|
||||||
{
|
|
||||||
allowSingleExtends: true,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
'@typescript-eslint/consistent-type-imports': [
|
|
||||||
'error',
|
|
||||||
{
|
|
||||||
prefer: 'type-imports',
|
|
||||||
fixStyle: 'separate-type-imports',
|
|
||||||
disallowTypeAnnotations: true,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
'@typescript-eslint/no-import-type-side-effects': 'error',
|
|
||||||
'simple-import-sort/imports': 'error',
|
|
||||||
'simple-import-sort/exports': 'error',
|
|
||||||
// Prefer @invoke-ai/ui components over chakra
|
|
||||||
'no-restricted-imports': 'off',
|
|
||||||
'@typescript-eslint/no-restricted-imports': [
|
|
||||||
'warn',
|
|
||||||
{
|
|
||||||
paths: [
|
|
||||||
{
|
|
||||||
name: '@chakra-ui/react',
|
|
||||||
message: "Please import from '@invoke-ai/ui' instead.",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: '@chakra-ui/layout',
|
|
||||||
message: "Please import from '@invoke-ai/ui' instead.",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: '@chakra-ui/portal',
|
|
||||||
message: "Please import from '@invoke-ai/ui' instead.",
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
},
|
||||||
overrides: [
|
overrides: [
|
||||||
|
/**
|
||||||
|
* Overrides for stories
|
||||||
|
*/
|
||||||
{
|
{
|
||||||
files: ['*.stories.tsx'],
|
files: ['*.stories.tsx'],
|
||||||
rules: {
|
rules: {
|
||||||
|
// We may not have i18n available in stories.
|
||||||
'i18next/no-literal-string': 'off',
|
'i18next/no-literal-string': 'off',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
settings: {
|
|
||||||
react: {
|
|
||||||
version: 'detect',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
@ -1,9 +1,5 @@
|
|||||||
module.exports = {
|
module.exports = {
|
||||||
trailingComma: 'es5',
|
...require('@invoke-ai/prettier-config-react'),
|
||||||
tabWidth: 2,
|
|
||||||
semi: true,
|
|
||||||
singleQuote: true,
|
|
||||||
endOfLine: 'auto',
|
|
||||||
overrides: [
|
overrides: [
|
||||||
{
|
{
|
||||||
files: ['public/locales/*.json'],
|
files: ['public/locales/*.json'],
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import { PropsWithChildren, memo, useEffect } from 'react';
|
import { PropsWithChildren, memo, useEffect } from 'react';
|
||||||
import { modelChanged } from '../src/features/parameters/store/generationSlice';
|
import { modelChanged } from '../src/features/parameters/store/generationSlice';
|
||||||
import { useAppDispatch } from '../src/app/store/storeHooks';
|
import { useAppDispatch } from '../src/app/store/storeHooks';
|
||||||
import { useGlobalModifiersInit } from '@invoke-ai/ui';
|
import { useGlobalModifiersInit } from '@invoke-ai/ui-library';
|
||||||
/**
|
/**
|
||||||
* Initializes some state for storybook. Must be in a different component
|
* Initializes some state for storybook. Must be in a different component
|
||||||
* so that it is run inside the redux context.
|
* so that it is run inside the redux context.
|
||||||
|
@ -6,7 +6,6 @@ import { Provider } from 'react-redux';
|
|||||||
import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider';
|
import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider';
|
||||||
import { $baseUrl } from '../src/app/store/nanostores/baseUrl';
|
import { $baseUrl } from '../src/app/store/nanostores/baseUrl';
|
||||||
import { createStore } from '../src/app/store/store';
|
import { createStore } from '../src/app/store/store';
|
||||||
import { Container } from '@chakra-ui/react';
|
|
||||||
// TODO: Disabled for IDE performance issues with our translation JSON
|
// TODO: Disabled for IDE performance issues with our translation JSON
|
||||||
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
||||||
// @ts-ignore
|
// @ts-ignore
|
||||||
|
@ -1,13 +1,7 @@
|
|||||||
{
|
{
|
||||||
"entry": ["src/main.tsx"],
|
"entry": ["src/main.tsx"],
|
||||||
"extensions": [".ts", ".tsx"],
|
"extensions": [".ts", ".tsx"],
|
||||||
"ignorePatterns": [
|
"ignorePatterns": ["**/node_modules/**", "dist/**", "public/**", "**/*.stories.tsx", "config/**"],
|
||||||
"**/node_modules/**",
|
|
||||||
"dist/**",
|
|
||||||
"public/**",
|
|
||||||
"**/*.stories.tsx",
|
|
||||||
"config/**"
|
|
||||||
],
|
|
||||||
"ignoreUnresolved": [],
|
"ignoreUnresolved": [],
|
||||||
"ignoreUnimported": ["src/i18.d.ts", "vite.config.ts", "src/vite-env.d.ts"],
|
"ignoreUnimported": ["src/i18.d.ts", "vite.config.ts", "src/vite-env.d.ts"],
|
||||||
"respectGitignore": true,
|
"respectGitignore": true,
|
||||||
|
150
invokeai/frontend/web/README.md
Normal file
150
invokeai/frontend/web/README.md
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
# Invoke UI
|
||||||
|
|
||||||
|
<!-- @import "[TOC]" {cmd="toc" depthFrom=2 depthTo=3 orderedList=false} -->
|
||||||
|
|
||||||
|
<!-- code_chunk_output -->
|
||||||
|
|
||||||
|
- [Dev environment](#dev-environment)
|
||||||
|
- [Setup](#setup)
|
||||||
|
- [Package scripts](#package-scripts)
|
||||||
|
- [Type generation](#type-generation)
|
||||||
|
- [Localization](#localization)
|
||||||
|
- [VSCode](#vscode)
|
||||||
|
- [Contributing](#contributing)
|
||||||
|
- [Check in before investing your time](#check-in-before-investing-your-time)
|
||||||
|
- [Commit format](#commit-format)
|
||||||
|
- [Submitting a PR](#submitting-a-pr)
|
||||||
|
- [Other docs](#other-docs)
|
||||||
|
|
||||||
|
<!-- /code_chunk_output -->
|
||||||
|
|
||||||
|
Invoke's UI is made possible by many contributors and open-source libraries. Thank you!
|
||||||
|
|
||||||
|
## Dev environment
|
||||||
|
|
||||||
|
### Setup
|
||||||
|
|
||||||
|
1. Install [node] and [pnpm].
|
||||||
|
1. Run `pnpm i` to install all packages.
|
||||||
|
|
||||||
|
#### Run in dev mode
|
||||||
|
|
||||||
|
1. From `invokeai/frontend/web/`, run `pnpm dev`.
|
||||||
|
1. From repo root, run `python scripts/invokeai-web.py`.
|
||||||
|
1. Point your browser to the dev server address, e.g. <http://localhost:5173/>
|
||||||
|
|
||||||
|
### Package scripts
|
||||||
|
|
||||||
|
- `dev`: run the frontend in dev mode, enabling hot reloading
|
||||||
|
- `build`: run all checks (madge, eslint, prettier, tsc) and then build the frontend
|
||||||
|
- `typegen`: generate types from the OpenAPI schema (see [Type generation])
|
||||||
|
- `lint:madge`: check frontend for circular dependencies
|
||||||
|
- `lint:eslint`: check frontend for code quality
|
||||||
|
- `lint:prettier`: check frontend for code formatting
|
||||||
|
- `lint:tsc`: check frontend for type issues
|
||||||
|
- `lint`: run all checks concurrently
|
||||||
|
- `fix`: run `eslint` and `prettier`, fixing fixable issues
|
||||||
|
|
||||||
|
### Type generation
|
||||||
|
|
||||||
|
We use [openapi-typescript] to generate types from the app's OpenAPI schema.
|
||||||
|
|
||||||
|
The generated types are committed to the repo in [schema.ts].
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# from the repo root, start the server
|
||||||
|
python scripts/invokeai-web.py
|
||||||
|
# from invokeai/frontend/web/, run the script
|
||||||
|
pnpm typegen
|
||||||
|
```
|
||||||
|
|
||||||
|
### Localization
|
||||||
|
|
||||||
|
We use [i18next] for localization, but translation to languages other than English happens on our [Weblate] project.
|
||||||
|
|
||||||
|
Only the English source strings should be changed on this repo.
|
||||||
|
|
||||||
|
### VSCode
|
||||||
|
|
||||||
|
#### Example debugger config
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"version": "0.2.0",
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"type": "chrome",
|
||||||
|
"request": "launch",
|
||||||
|
"name": "Invoke UI",
|
||||||
|
"url": "http://localhost:5173",
|
||||||
|
"webRoot": "${workspaceFolder}/invokeai/frontend/web",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Remote dev
|
||||||
|
|
||||||
|
We've noticed an intermittent timeout issue with the VSCode remote dev port forwarding.
|
||||||
|
|
||||||
|
We suggest disabling the editor's port forwarding feature and doing it manually via SSH:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@host
|
||||||
|
```
|
||||||
|
|
||||||
|
## Contributing Guidelines
|
||||||
|
|
||||||
|
Thanks for your interest in contributing to the Invoke Web UI!
|
||||||
|
|
||||||
|
Please follow these guidelines when contributing.
|
||||||
|
|
||||||
|
### Check in before investing your time
|
||||||
|
|
||||||
|
Please check in before you invest your time on anything besides a trivial fix, in case it conflicts with ongoing work or isn't aligned with the vision for the app.
|
||||||
|
|
||||||
|
If a feature request or issue doesn't already exist for the thing you want to work on, please create one.
|
||||||
|
|
||||||
|
Ping `@psychedelicious` on [discord] in the `#frontend-dev` channel or in the feature request / issue you want to work on - we're happy chat.
|
||||||
|
|
||||||
|
### Code conventions
|
||||||
|
|
||||||
|
- This is a fairly complex app with a deep component tree. Please use memoization (`useCallback`, `useMemo`, `memo`) with enthusiasm.
|
||||||
|
- If you need to add some global, ephemeral state, please use [nanostores] if possible.
|
||||||
|
- Be careful with your redux selectors. If they need to be parameterized, consider creating them inside a `useMemo`.
|
||||||
|
- Feel free to use `lodash` (via `lodash-es`) to make the intent of your code clear.
|
||||||
|
- Please add comments describing the "why", not the "how" (unless it is really arcane).
|
||||||
|
|
||||||
|
### Commit format
|
||||||
|
|
||||||
|
Please use the [conventional commits] spec for the web UI, with a scope of "ui":
|
||||||
|
|
||||||
|
- `chore(ui): bump deps`
|
||||||
|
- `chore(ui): lint`
|
||||||
|
- `feat(ui): add some cool new feature`
|
||||||
|
- `fix(ui): fix some bug`
|
||||||
|
|
||||||
|
### Submitting a PR
|
||||||
|
|
||||||
|
- Ensure your branch is tidy. Use an interactive rebase to clean up the commit history and reword the commit messages if they are not descriptive.
|
||||||
|
- Run `pnpm lint`. Some issues are auto-fixable with `pnpm fix`.
|
||||||
|
- Fill out the PR form when creating the PR.
|
||||||
|
- It doesn't need to be super detailed, but a screenshot or video is nice if you changed something visually.
|
||||||
|
- If a section isn't relevant, delete it. There are no UI tests at this time.
|
||||||
|
|
||||||
|
## Other docs
|
||||||
|
|
||||||
|
- [Workflows - Design and Implementation]
|
||||||
|
- [State Management]
|
||||||
|
|
||||||
|
[node]: https://nodejs.org/en/download/
|
||||||
|
[pnpm]: https://github.com/pnpm/pnpm
|
||||||
|
[discord]: https://discord.gg/ZmtBAhwWhy
|
||||||
|
[i18next]: https://github.com/i18next/react-i18next
|
||||||
|
[Weblate]: https://hosted.weblate.org/engage/invokeai/
|
||||||
|
[openapi-typescript]: https://github.com/drwpow/openapi-typescript
|
||||||
|
[Type generation]: #type-generation
|
||||||
|
[schema.ts]: ../src/services/api/schema.ts
|
||||||
|
[conventional commits]: https://www.conventionalcommits.org/en/v1.0.0/
|
||||||
|
[Workflows - Design and Implementation]: ./docs/WORKFLOWS_DESIGN_IMPLEMENTATION.md
|
||||||
|
[State Management]: ./docs/STATE_MGMT.md
|
@ -22,12 +22,13 @@ export const packageConfig: UserConfig = {
|
|||||||
fileName: (format) => `invoke-ai-ui.${format}.js`,
|
fileName: (format) => `invoke-ai-ui.${format}.js`,
|
||||||
},
|
},
|
||||||
rollupOptions: {
|
rollupOptions: {
|
||||||
external: ['react', 'react-dom', '@emotion/react', '@chakra-ui/react'],
|
external: ['react', 'react-dom', '@emotion/react', '@chakra-ui/react', '@invoke-ai/ui-library'],
|
||||||
output: {
|
output: {
|
||||||
globals: {
|
globals: {
|
||||||
react: 'React',
|
react: 'React',
|
||||||
'react-dom': 'ReactDOM',
|
'react-dom': 'ReactDOM',
|
||||||
'@emotion/react': 'EmotionReact',
|
'@emotion/react': 'EmotionReact',
|
||||||
|
'@invoke-ai/ui-library': 'UiLibrary',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -1,154 +0,0 @@
|
|||||||
# InvokeAI Web UI
|
|
||||||
|
|
||||||
<!-- @import "[TOC]" {cmd="toc" depthFrom=1 depthTo=6 orderedList=false} -->
|
|
||||||
|
|
||||||
<!-- code_chunk_output -->
|
|
||||||
|
|
||||||
- [InvokeAI Web UI](#invokeai-web-ui)
|
|
||||||
- [Core Libraries](#core-libraries)
|
|
||||||
- [Redux Toolkit](#redux-toolkit)
|
|
||||||
- [Socket\.IO](#socketio)
|
|
||||||
- [Chakra UI](#chakra-ui)
|
|
||||||
- [KonvaJS](#konvajs)
|
|
||||||
- [Vite](#vite)
|
|
||||||
- [i18next & Weblate](#i18next--weblate)
|
|
||||||
- [openapi-typescript](#openapi-typescript)
|
|
||||||
- [reactflow](#reactflow)
|
|
||||||
- [zod](#zod)
|
|
||||||
- [Client Types Generation](#client-types-generation)
|
|
||||||
- [Package Scripts](#package-scripts)
|
|
||||||
- [Contributing](#contributing)
|
|
||||||
- [Dev Environment](#dev-environment)
|
|
||||||
- [VSCode Remote Dev](#vscode-remote-dev)
|
|
||||||
- [Production builds](#production-builds)
|
|
||||||
|
|
||||||
<!-- /code_chunk_output -->
|
|
||||||
|
|
||||||
The UI is a fairly straightforward Typescript React app.
|
|
||||||
|
|
||||||
## Core Libraries
|
|
||||||
|
|
||||||
InvokeAI's UI is made possible by a number of excellent open-source libraries. The most heavily-used are listed below, but there are many others.
|
|
||||||
|
|
||||||
### Redux Toolkit
|
|
||||||
|
|
||||||
[Redux Toolkit] is used for state management and fetching/caching:
|
|
||||||
|
|
||||||
- `RTK-Query` for data fetching and caching
|
|
||||||
- `createAsyncThunk` for a couple other HTTP requests
|
|
||||||
- `createEntityAdapter` to normalize things like images and models
|
|
||||||
- `createListenerMiddleware` for async workflows
|
|
||||||
|
|
||||||
We use [redux-remember] for persistence.
|
|
||||||
|
|
||||||
### Socket\.IO
|
|
||||||
|
|
||||||
[Socket.IO] is used for server-to-client events, like generation process and queue state changes.
|
|
||||||
|
|
||||||
### Chakra UI
|
|
||||||
|
|
||||||
[Chakra UI] is our primary UI library, but we also use a few components from [Mantine v6].
|
|
||||||
|
|
||||||
### KonvaJS
|
|
||||||
|
|
||||||
[KonvaJS] powers the canvas. In the future, we'd like to explore [PixiJS] or WebGPU.
|
|
||||||
|
|
||||||
### Vite
|
|
||||||
|
|
||||||
[Vite] is our bundler.
|
|
||||||
|
|
||||||
### i18next & Weblate
|
|
||||||
|
|
||||||
We use [i18next] for localization, but translation to languages other than English happens on our [Weblate] project. **Only the English source strings should be changed on this repo.**
|
|
||||||
|
|
||||||
### openapi-typescript
|
|
||||||
|
|
||||||
[openapi-typescript] is used to generate types from the server's OpenAPI schema. See TYPES_CODEGEN.md.
|
|
||||||
|
|
||||||
### reactflow
|
|
||||||
|
|
||||||
[reactflow] powers the Workflow Editor.
|
|
||||||
|
|
||||||
### zod
|
|
||||||
|
|
||||||
[zod] schemas are used to model data structures and provide runtime validation.
|
|
||||||
|
|
||||||
## Client Types Generation
|
|
||||||
|
|
||||||
We use [openapi-typescript] to generate types from the app's OpenAPI schema.
|
|
||||||
|
|
||||||
The generated types are written to `invokeai/frontend/web/src/services/api/schema.d.ts`. This file is committed to the repo.
|
|
||||||
|
|
||||||
The server must be started and available at <http://127.0.0.1:9090>.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
# from the repo root, start the server
|
|
||||||
python scripts/invokeai-web.py
|
|
||||||
# from invokeai/frontend/web/, run the script
|
|
||||||
pnpm typegen
|
|
||||||
```
|
|
||||||
|
|
||||||
## Package Scripts
|
|
||||||
|
|
||||||
See `package.json` for all scripts.
|
|
||||||
|
|
||||||
Run with `pnpm <script name>`.
|
|
||||||
|
|
||||||
- `dev`: run the frontend in dev mode, enabling hot reloading
|
|
||||||
- `build`: run all checks (madge, eslint, prettier, tsc) and then build the frontend
|
|
||||||
- `typegen`: generate types from the OpenAPI schema (see [Client Types Generation](#client-types-generation))
|
|
||||||
- `lint:madge`: check frontend for circular dependencies
|
|
||||||
- `lint:eslint`: check frontend for code quality
|
|
||||||
- `lint:prettier`: check frontend for code formatting
|
|
||||||
- `lint:tsc`: check frontend for type issues
|
|
||||||
- `lint`: run all checks concurrently
|
|
||||||
- `fix`: run `eslint` and `prettier`, fixing fixable issues
|
|
||||||
|
|
||||||
## Contributing
|
|
||||||
|
|
||||||
Thanks for your interest in contributing to the InvokeAI Web UI!
|
|
||||||
|
|
||||||
We encourage you to ping @psychedelicious and @blessedcoolant on [discord] if you want to contribute, just to touch base and ensure your work doesn't conflict with anything else going on. The project is very active.
|
|
||||||
|
|
||||||
### Dev Environment
|
|
||||||
|
|
||||||
Install [node] and [pnpm].
|
|
||||||
|
|
||||||
From `invokeai/frontend/web/` run `pnpm i` to get everything set up.
|
|
||||||
|
|
||||||
Start everything in dev mode:
|
|
||||||
|
|
||||||
1. Start the dev server: `pnpm dev`
|
|
||||||
2. Start the InvokeAI Nodes backend: `python scripts/invokeai-web.py # run from the repo root`
|
|
||||||
3. Point your browser to the dev server address e.g. <http://localhost:5173/>
|
|
||||||
|
|
||||||
#### VSCode Remote Dev
|
|
||||||
|
|
||||||
We've noticed an intermittent issue with the VSCode Remote Dev port forwarding. If you use this feature of VSCode, you may intermittently click the Invoke button and then get nothing until the request times out. Suggest disabling the IDE's port forwarding feature and doing it manually via SSH:
|
|
||||||
|
|
||||||
`ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@host`
|
|
||||||
|
|
||||||
### Production builds
|
|
||||||
|
|
||||||
For a number of technical and logistical reasons, we need to commit UI build artefacts to the repo.
|
|
||||||
|
|
||||||
If you submit a PR, there is a good chance we will ask you to include a separate commit with a build of the app.
|
|
||||||
|
|
||||||
To build for production, run `pnpm build`.
|
|
||||||
|
|
||||||
[node]: https://nodejs.org/en/download/
|
|
||||||
[pnpm]: https://github.com/pnpm/pnpm
|
|
||||||
[discord]: https://discord.gg/ZmtBAhwWhy
|
|
||||||
[Redux Toolkit]: https://github.com/reduxjs/redux-toolkit
|
|
||||||
[redux-remember]: https://github.com/zewish/redux-remember
|
|
||||||
[Socket.IO]: https://github.com/socketio/socket.io
|
|
||||||
[Chakra UI]: https://github.com/chakra-ui/chakra-ui
|
|
||||||
[Mantine v6]: https://v6.mantine.dev/
|
|
||||||
[KonvaJS]: https://github.com/konvajs/react-konva
|
|
||||||
[PixiJS]: https://github.com/pixijs/pixijs
|
|
||||||
[Vite]: https://github.com/vitejs/vite
|
|
||||||
[i18next]: https://github.com/i18next/react-i18next
|
|
||||||
[Weblate]: https://hosted.weblate.org/engage/invokeai/
|
|
||||||
[openapi-typescript]: https://github.com/drwpow/openapi-typescript
|
|
||||||
[reactflow]: https://github.com/xyflow/xyflow
|
|
||||||
[zod]: https://github.com/colinhacks/zod
|
|
38
invokeai/frontend/web/docs/STATE_MGMT.md
Normal file
38
invokeai/frontend/web/docs/STATE_MGMT.md
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
# State Management
|
||||||
|
|
||||||
|
The app makes heavy use of Redux Toolkit, its Query library, and `nanostores`.
|
||||||
|
|
||||||
|
## Redux
|
||||||
|
|
||||||
|
TODO
|
||||||
|
|
||||||
|
## `nanostores`
|
||||||
|
|
||||||
|
[nanostores] is a tiny state management library. It provides both imperative and declarative APIs.
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
```ts
|
||||||
|
export const $myStringOption = atom<string | null>(null);
|
||||||
|
|
||||||
|
// Outside a component, or within a callback for performance-critical logic
|
||||||
|
$myStringOption.get();
|
||||||
|
$myStringOption.set('new value');
|
||||||
|
|
||||||
|
// Inside a component
|
||||||
|
const myStringOption = useStore($myStringOption);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Where to put nanostores
|
||||||
|
|
||||||
|
- For global application state, export your stores from `invokeai/frontend/web/src/app/store/nanostores/`.
|
||||||
|
- For feature state, create a file for the stores next to the redux slice definition (e.g. `invokeai/frontend/web/src/features/myFeature/myFeatureNanostores.ts`).
|
||||||
|
- For hooks with global state, export the store from the same file the hook is in, or put it next to the hook.
|
||||||
|
|
||||||
|
### When to use nanostores
|
||||||
|
|
||||||
|
- For non-serializable data that needs to be available throughout the app, use `nanostores` instead of a global.
|
||||||
|
- For ephemeral global state (i.e. state that does not need to be persisted), use `nanostores` instead of redux.
|
||||||
|
- For performance-critical code and in callbacks, redux selectors can be problematic due to the declarative reactivity system. Consider refactoring to use `nanostores` if there's a **measurable** performance issue.
|
||||||
|
|
||||||
|
[nanostores]: https://github.com/nanostores/nanostores/
|
@ -23,7 +23,7 @@
|
|||||||
- [Primitive Types](#primitive-types)
|
- [Primitive Types](#primitive-types)
|
||||||
- [Complex Types](#complex-types)
|
- [Complex Types](#complex-types)
|
||||||
- [Collection Types](#collection-types)
|
- [Collection Types](#collection-types)
|
||||||
- [Polymorphic Types](#polymorphic-types)
|
- [Collection or Scalar Types](#collection-or-scalar-types)
|
||||||
- [Optional Fields](#optional-fields)
|
- [Optional Fields](#optional-fields)
|
||||||
- [Building Field Input Templates](#building-field-input-templates)
|
- [Building Field Input Templates](#building-field-input-templates)
|
||||||
- [Building Field Output Templates](#building-field-output-templates)
|
- [Building Field Output Templates](#building-field-output-templates)
|
||||||
|
@ -19,8 +19,8 @@
|
|||||||
"dist"
|
"dist"
|
||||||
],
|
],
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"dev": "concurrently \"vite dev\" \"pnpm run theme:watch\"",
|
"dev": "vite dev",
|
||||||
"dev:host": "concurrently \"vite dev --host\" \"pnpm run theme:watch\"",
|
"dev:host": "vite dev --host",
|
||||||
"build": "pnpm run lint && vite build",
|
"build": "pnpm run lint && vite build",
|
||||||
"typegen": "node scripts/typegen.js",
|
"typegen": "node scripts/typegen.js",
|
||||||
"preview": "vite preview",
|
"preview": "vite preview",
|
||||||
@ -31,9 +31,6 @@
|
|||||||
"lint": "concurrently -g -n eslint,prettier,tsc,madge -c cyan,green,magenta,yellow \"pnpm run lint:eslint\" \"pnpm run lint:prettier\" \"pnpm run lint:tsc\" \"pnpm run lint:madge\"",
|
"lint": "concurrently -g -n eslint,prettier,tsc,madge -c cyan,green,magenta,yellow \"pnpm run lint:eslint\" \"pnpm run lint:prettier\" \"pnpm run lint:tsc\" \"pnpm run lint:madge\"",
|
||||||
"fix": "eslint --fix . && prettier --log-level warn --write .",
|
"fix": "eslint --fix . && prettier --log-level warn --write .",
|
||||||
"preinstall": "npx only-allow pnpm",
|
"preinstall": "npx only-allow pnpm",
|
||||||
"postinstall": "pnpm run theme",
|
|
||||||
"theme": "chakra-cli tokens node_modules/@invoke-ai/ui",
|
|
||||||
"theme:watch": "chakra-cli tokens node_modules/@invoke-ai/ui --watch",
|
|
||||||
"storybook": "storybook dev -p 6006",
|
"storybook": "storybook dev -p 6006",
|
||||||
"build-storybook": "storybook build",
|
"build-storybook": "storybook build",
|
||||||
"unimported": "npx unimported"
|
"unimported": "npx unimported"
|
||||||
@ -52,21 +49,12 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@chakra-ui/anatomy": "^2.2.2",
|
|
||||||
"@chakra-ui/icons": "^2.1.1",
|
|
||||||
"@chakra-ui/layout": "^2.3.1",
|
|
||||||
"@chakra-ui/portal": "^2.1.0",
|
|
||||||
"@chakra-ui/react": "^2.8.2",
|
|
||||||
"@chakra-ui/react-use-size": "^2.1.0",
|
"@chakra-ui/react-use-size": "^2.1.0",
|
||||||
"@chakra-ui/styled-system": "^2.9.2",
|
|
||||||
"@chakra-ui/theme-tools": "^2.1.2",
|
|
||||||
"@dagrejs/graphlib": "^2.1.13",
|
"@dagrejs/graphlib": "^2.1.13",
|
||||||
"@dnd-kit/core": "^6.1.0",
|
"@dnd-kit/core": "^6.1.0",
|
||||||
"@dnd-kit/utilities": "^3.2.2",
|
"@dnd-kit/utilities": "^3.2.2",
|
||||||
"@emotion/react": "^11.11.3",
|
|
||||||
"@emotion/styled": "^11.11.0",
|
|
||||||
"@fontsource-variable/inter": "^5.0.16",
|
"@fontsource-variable/inter": "^5.0.16",
|
||||||
"@invoke-ai/ui": "0.0.10",
|
"@invoke-ai/ui-library": "^0.0.18",
|
||||||
"@mantine/form": "6.0.21",
|
"@mantine/form": "6.0.21",
|
||||||
"@nanostores/react": "^0.7.1",
|
"@nanostores/react": "^0.7.1",
|
||||||
"@reduxjs/toolkit": "2.0.1",
|
"@reduxjs/toolkit": "2.0.1",
|
||||||
@ -116,7 +104,6 @@
|
|||||||
"zod-validation-error": "^3.0.0"
|
"zod-validation-error": "^3.0.0"
|
||||||
},
|
},
|
||||||
"peerDependencies": {
|
"peerDependencies": {
|
||||||
"@chakra-ui/cli": "^2.4.1",
|
|
||||||
"@chakra-ui/react": "^2.8.2",
|
"@chakra-ui/react": "^2.8.2",
|
||||||
"react": "^18.2.0",
|
"react": "^18.2.0",
|
||||||
"react-dom": "^18.2.0",
|
"react-dom": "^18.2.0",
|
||||||
@ -124,7 +111,8 @@
|
|||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@arthurgeron/eslint-plugin-react-usememo": "^2.2.3",
|
"@arthurgeron/eslint-plugin-react-usememo": "^2.2.3",
|
||||||
"@chakra-ui/cli": "^2.4.1",
|
"@invoke-ai/eslint-config-react": "^0.0.13",
|
||||||
|
"@invoke-ai/prettier-config-react": "^0.0.6",
|
||||||
"@storybook/addon-docs": "^7.6.10",
|
"@storybook/addon-docs": "^7.6.10",
|
||||||
"@storybook/addon-essentials": "^7.6.10",
|
"@storybook/addon-essentials": "^7.6.10",
|
||||||
"@storybook/addon-interactions": "^7.6.10",
|
"@storybook/addon-interactions": "^7.6.10",
|
||||||
@ -164,7 +152,7 @@
|
|||||||
"storybook": "^7.6.10",
|
"storybook": "^7.6.10",
|
||||||
"ts-toolbelt": "^9.6.0",
|
"ts-toolbelt": "^9.6.0",
|
||||||
"typescript": "^5.3.3",
|
"typescript": "^5.3.3",
|
||||||
"vite": "^5.0.11",
|
"vite": "^5.0.12",
|
||||||
"vite-plugin-css-injected-by-js": "^3.3.1",
|
"vite-plugin-css-injected-by-js": "^3.3.1",
|
||||||
"vite-plugin-dts": "^3.7.1",
|
"vite-plugin-dts": "^3.7.1",
|
||||||
"vite-plugin-eslint": "^1.8.1",
|
"vite-plugin-eslint": "^1.8.1",
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -98,7 +98,7 @@
|
|||||||
"outputs": "Ausgabe",
|
"outputs": "Ausgabe",
|
||||||
"data": "Daten",
|
"data": "Daten",
|
||||||
"safetensors": "Safetensors",
|
"safetensors": "Safetensors",
|
||||||
"outpaint": "outpaint",
|
"outpaint": "Ausmalen",
|
||||||
"details": "Details",
|
"details": "Details",
|
||||||
"format": "Format",
|
"format": "Format",
|
||||||
"unknown": "Unbekannt",
|
"unknown": "Unbekannt",
|
||||||
@ -110,7 +110,29 @@
|
|||||||
"somethingWentWrong": "Etwas ist schief gelaufen",
|
"somethingWentWrong": "Etwas ist schief gelaufen",
|
||||||
"copyError": "$t(gallery.copy) Fehler",
|
"copyError": "$t(gallery.copy) Fehler",
|
||||||
"input": "Eingabe",
|
"input": "Eingabe",
|
||||||
"notInstalled": "Nicht $t(common.installed)"
|
"notInstalled": "Nicht $t(common.installed)",
|
||||||
|
"advancedOptions": "Erweiterte Einstellungen",
|
||||||
|
"alpha": "Alpha",
|
||||||
|
"red": "Rot",
|
||||||
|
"green": "Grün",
|
||||||
|
"blue": "Blau",
|
||||||
|
"delete": "Löschen",
|
||||||
|
"or": "oder",
|
||||||
|
"direction": "Richtung",
|
||||||
|
"free": "Frei",
|
||||||
|
"save": "Speichern",
|
||||||
|
"preferencesLabel": "Präferenzen",
|
||||||
|
"created": "Erstellt",
|
||||||
|
"prevPage": "Vorherige Seite",
|
||||||
|
"nextPage": "Nächste Seite",
|
||||||
|
"unknownError": "Unbekannter Fehler",
|
||||||
|
"unsaved": "Nicht gespeichert",
|
||||||
|
"aboutDesc": "Verwenden Sie Invoke für die Arbeit? Dann siehe hier:",
|
||||||
|
"localSystem": "Lokales System",
|
||||||
|
"orderBy": "Ordnen nach",
|
||||||
|
"saveAs": "Speicher als",
|
||||||
|
"updated": "Aktualisiert",
|
||||||
|
"copy": "Kopieren"
|
||||||
},
|
},
|
||||||
"gallery": {
|
"gallery": {
|
||||||
"generations": "Erzeugungen",
|
"generations": "Erzeugungen",
|
||||||
@ -140,7 +162,13 @@
|
|||||||
"currentlyInUse": "Dieses Bild wird derzeit in den folgenden Funktionen verwendet:",
|
"currentlyInUse": "Dieses Bild wird derzeit in den folgenden Funktionen verwendet:",
|
||||||
"deleteImagePermanent": "Gelöschte Bilder können nicht wiederhergestellt werden.",
|
"deleteImagePermanent": "Gelöschte Bilder können nicht wiederhergestellt werden.",
|
||||||
"autoAssignBoardOnClick": "Board per Klick automatisch zuweisen",
|
"autoAssignBoardOnClick": "Board per Klick automatisch zuweisen",
|
||||||
"noImageSelected": "Kein Bild ausgewählt"
|
"noImageSelected": "Kein Bild ausgewählt",
|
||||||
|
"problemDeletingImagesDesc": "Eins oder mehr Bilder könnten nicht gelöscht werden",
|
||||||
|
"starImage": "Bild markieren",
|
||||||
|
"assets": "Ressourcen",
|
||||||
|
"unstarImage": "Markierung Entfernen",
|
||||||
|
"image": "Bild",
|
||||||
|
"deleteSelection": "Lösche markierte"
|
||||||
},
|
},
|
||||||
"hotkeys": {
|
"hotkeys": {
|
||||||
"keyboardShortcuts": "Tastenkürzel",
|
"keyboardShortcuts": "Tastenkürzel",
|
||||||
@ -344,7 +372,13 @@
|
|||||||
"addNodes": {
|
"addNodes": {
|
||||||
"title": "Knotenpunkt hinzufügen",
|
"title": "Knotenpunkt hinzufügen",
|
||||||
"desc": "Öffnet das Menü zum Hinzufügen von Knoten"
|
"desc": "Öffnet das Menü zum Hinzufügen von Knoten"
|
||||||
}
|
},
|
||||||
|
"cancelAndClear": {
|
||||||
|
"title": "Abbruch und leeren"
|
||||||
|
},
|
||||||
|
"noHotkeysFound": "Kein Hotkey gefunden",
|
||||||
|
"searchHotkeys": "Hotkeys durchsuchen",
|
||||||
|
"clearSearch": "Suche leeren"
|
||||||
},
|
},
|
||||||
"modelManager": {
|
"modelManager": {
|
||||||
"modelAdded": "Model hinzugefügt",
|
"modelAdded": "Model hinzugefügt",
|
||||||
@ -701,7 +735,8 @@
|
|||||||
"invokeProgressBar": "Invoke Fortschrittsanzeige",
|
"invokeProgressBar": "Invoke Fortschrittsanzeige",
|
||||||
"mode": "Modus",
|
"mode": "Modus",
|
||||||
"resetUI": "$t(accessibility.reset) von UI",
|
"resetUI": "$t(accessibility.reset) von UI",
|
||||||
"createIssue": "Ticket erstellen"
|
"createIssue": "Ticket erstellen",
|
||||||
|
"about": "Über"
|
||||||
},
|
},
|
||||||
"boards": {
|
"boards": {
|
||||||
"autoAddBoard": "Automatisches Hinzufügen zum Ordner",
|
"autoAddBoard": "Automatisches Hinzufügen zum Ordner",
|
||||||
@ -809,7 +844,14 @@
|
|||||||
"canny": "Canny",
|
"canny": "Canny",
|
||||||
"hedDescription": "Ganzheitlich verschachtelte Kantenerkennung",
|
"hedDescription": "Ganzheitlich verschachtelte Kantenerkennung",
|
||||||
"scribble": "Scribble",
|
"scribble": "Scribble",
|
||||||
"maxFaces": "Maximal Anzahl Gesichter"
|
"maxFaces": "Maximal Anzahl Gesichter",
|
||||||
|
"resizeSimple": "Größe ändern (einfach)",
|
||||||
|
"large": "Groß",
|
||||||
|
"modelSize": "Modell Größe",
|
||||||
|
"small": "Klein",
|
||||||
|
"base": "Basis",
|
||||||
|
"depthAnything": "Depth Anything",
|
||||||
|
"depthAnythingDescription": "Erstellung einer Tiefenkarte mit der Depth Anything-Technik"
|
||||||
},
|
},
|
||||||
"queue": {
|
"queue": {
|
||||||
"status": "Status",
|
"status": "Status",
|
||||||
@ -842,7 +884,7 @@
|
|||||||
"item": "Auftrag",
|
"item": "Auftrag",
|
||||||
"notReady": "Warteschlange noch nicht bereit",
|
"notReady": "Warteschlange noch nicht bereit",
|
||||||
"batchValues": "Stapel Werte",
|
"batchValues": "Stapel Werte",
|
||||||
"queueCountPrediction": "{{predicted}} zur Warteschlange hinzufügen",
|
"queueCountPrediction": "{{promptsCount}} Prompts × {{iterations}} Iterationen -> {{count}} Generationen",
|
||||||
"queuedCount": "{{pending}} wartenden Elemente",
|
"queuedCount": "{{pending}} wartenden Elemente",
|
||||||
"clearQueueAlertDialog": "Die Warteschlange leeren, stoppt den aktuellen Prozess und leert die Warteschlange komplett.",
|
"clearQueueAlertDialog": "Die Warteschlange leeren, stoppt den aktuellen Prozess und leert die Warteschlange komplett.",
|
||||||
"completedIn": "Fertig in",
|
"completedIn": "Fertig in",
|
||||||
@ -864,7 +906,9 @@
|
|||||||
"back": "Hinten",
|
"back": "Hinten",
|
||||||
"resumeSucceeded": "Prozessor wieder aufgenommen",
|
"resumeSucceeded": "Prozessor wieder aufgenommen",
|
||||||
"resumeTooltip": "Prozessor wieder aufnehmen",
|
"resumeTooltip": "Prozessor wieder aufnehmen",
|
||||||
"time": "Zeit"
|
"time": "Zeit",
|
||||||
|
"batchQueuedDesc_one": "{{count}} Eintrage ans {{direction}} der Wartschlange hinzugefügt",
|
||||||
|
"batchQueuedDesc_other": "{{count}} Einträge ans {{direction}} der Wartschlange hinzugefügt"
|
||||||
},
|
},
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"negativePrompt": "Negativ Beschreibung",
|
"negativePrompt": "Negativ Beschreibung",
|
||||||
@ -933,7 +977,8 @@
|
|||||||
"enable": "Aktivieren",
|
"enable": "Aktivieren",
|
||||||
"clear": "Leeren",
|
"clear": "Leeren",
|
||||||
"maxCacheSize": "Maximale Cache Größe",
|
"maxCacheSize": "Maximale Cache Größe",
|
||||||
"cacheSize": "Cache Größe"
|
"cacheSize": "Cache Größe",
|
||||||
|
"useCache": "Benutze Cache"
|
||||||
},
|
},
|
||||||
"embedding": {
|
"embedding": {
|
||||||
"noMatchingEmbedding": "Keine passenden Embeddings",
|
"noMatchingEmbedding": "Keine passenden Embeddings",
|
||||||
@ -999,5 +1044,28 @@
|
|||||||
"selectLoRA": "Wählen ein LoRA aus",
|
"selectLoRA": "Wählen ein LoRA aus",
|
||||||
"esrganModel": "ESRGAN Modell",
|
"esrganModel": "ESRGAN Modell",
|
||||||
"addLora": "LoRA hinzufügen"
|
"addLora": "LoRA hinzufügen"
|
||||||
|
},
|
||||||
|
"accordions": {
|
||||||
|
"generation": {
|
||||||
|
"title": "Erstellung",
|
||||||
|
"modelTab": "Modell",
|
||||||
|
"conceptsTab": "Konzepte"
|
||||||
|
},
|
||||||
|
"image": {
|
||||||
|
"title": "Bild"
|
||||||
|
},
|
||||||
|
"advanced": {
|
||||||
|
"title": "Erweitert"
|
||||||
|
},
|
||||||
|
"control": {
|
||||||
|
"title": "Kontrolle",
|
||||||
|
"controlAdaptersTab": "Kontroll Adapter",
|
||||||
|
"ipTab": "Bild Beschreibung"
|
||||||
|
},
|
||||||
|
"compositing": {
|
||||||
|
"coherenceTab": "Kohärenzpass",
|
||||||
|
"infillTab": "Füllung",
|
||||||
|
"title": "Compositing"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -86,6 +86,7 @@
|
|||||||
"back": "Back",
|
"back": "Back",
|
||||||
"batch": "Batch Manager",
|
"batch": "Batch Manager",
|
||||||
"cancel": "Cancel",
|
"cancel": "Cancel",
|
||||||
|
"copy": "Copy",
|
||||||
"copyError": "$t(gallery.copy) Error",
|
"copyError": "$t(gallery.copy) Error",
|
||||||
"close": "Close",
|
"close": "Close",
|
||||||
"on": "On",
|
"on": "On",
|
||||||
@ -224,6 +225,7 @@
|
|||||||
"amult": "a_mult",
|
"amult": "a_mult",
|
||||||
"autoConfigure": "Auto configure processor",
|
"autoConfigure": "Auto configure processor",
|
||||||
"balanced": "Balanced",
|
"balanced": "Balanced",
|
||||||
|
"base": "Base",
|
||||||
"beginEndStepPercent": "Begin / End Step Percentage",
|
"beginEndStepPercent": "Begin / End Step Percentage",
|
||||||
"bgth": "bg_th",
|
"bgth": "bg_th",
|
||||||
"canny": "Canny",
|
"canny": "Canny",
|
||||||
@ -237,6 +239,8 @@
|
|||||||
"controlMode": "Control Mode",
|
"controlMode": "Control Mode",
|
||||||
"crop": "Crop",
|
"crop": "Crop",
|
||||||
"delete": "Delete",
|
"delete": "Delete",
|
||||||
|
"depthAnything": "Depth Anything",
|
||||||
|
"depthAnythingDescription": "Depth map generation using the Depth Anything technique",
|
||||||
"depthMidas": "Depth (Midas)",
|
"depthMidas": "Depth (Midas)",
|
||||||
"depthMidasDescription": "Depth map generation using Midas",
|
"depthMidasDescription": "Depth map generation using Midas",
|
||||||
"depthZoe": "Depth (Zoe)",
|
"depthZoe": "Depth (Zoe)",
|
||||||
@ -256,6 +260,7 @@
|
|||||||
"colorMapTileSize": "Tile Size",
|
"colorMapTileSize": "Tile Size",
|
||||||
"importImageFromCanvas": "Import Image From Canvas",
|
"importImageFromCanvas": "Import Image From Canvas",
|
||||||
"importMaskFromCanvas": "Import Mask From Canvas",
|
"importMaskFromCanvas": "Import Mask From Canvas",
|
||||||
|
"large": "Large",
|
||||||
"lineart": "Lineart",
|
"lineart": "Lineart",
|
||||||
"lineartAnime": "Lineart Anime",
|
"lineartAnime": "Lineart Anime",
|
||||||
"lineartAnimeDescription": "Anime-style lineart processing",
|
"lineartAnimeDescription": "Anime-style lineart processing",
|
||||||
@ -268,6 +273,7 @@
|
|||||||
"minConfidence": "Min Confidence",
|
"minConfidence": "Min Confidence",
|
||||||
"mlsd": "M-LSD",
|
"mlsd": "M-LSD",
|
||||||
"mlsdDescription": "Minimalist Line Segment Detector",
|
"mlsdDescription": "Minimalist Line Segment Detector",
|
||||||
|
"modelSize": "Model Size",
|
||||||
"none": "None",
|
"none": "None",
|
||||||
"noneDescription": "No processing applied",
|
"noneDescription": "No processing applied",
|
||||||
"normalBae": "Normal BAE",
|
"normalBae": "Normal BAE",
|
||||||
@ -288,6 +294,7 @@
|
|||||||
"selectModel": "Select a model",
|
"selectModel": "Select a model",
|
||||||
"setControlImageDimensions": "Set Control Image Dimensions To W/H",
|
"setControlImageDimensions": "Set Control Image Dimensions To W/H",
|
||||||
"showAdvanced": "Show Advanced",
|
"showAdvanced": "Show Advanced",
|
||||||
|
"small": "Small",
|
||||||
"toggleControlNet": "Toggle this ControlNet",
|
"toggleControlNet": "Toggle this ControlNet",
|
||||||
"w": "W",
|
"w": "W",
|
||||||
"weight": "Weight",
|
"weight": "Weight",
|
||||||
@ -600,6 +607,10 @@
|
|||||||
"desc": "Send current image to Image to Image",
|
"desc": "Send current image to Image to Image",
|
||||||
"title": "Send To Image To Image"
|
"title": "Send To Image To Image"
|
||||||
},
|
},
|
||||||
|
"remixImage": {
|
||||||
|
"desc": "Use all parameters except seed from the current image",
|
||||||
|
"title": "Remix image"
|
||||||
|
},
|
||||||
"setParameters": {
|
"setParameters": {
|
||||||
"desc": "Use all parameters of the current image",
|
"desc": "Use all parameters of the current image",
|
||||||
"title": "Set Parameters"
|
"title": "Set Parameters"
|
||||||
@ -1003,6 +1014,9 @@
|
|||||||
"newWorkflow": "New Workflow",
|
"newWorkflow": "New Workflow",
|
||||||
"newWorkflowDesc": "Create a new workflow?",
|
"newWorkflowDesc": "Create a new workflow?",
|
||||||
"newWorkflowDesc2": "Your current workflow has unsaved changes.",
|
"newWorkflowDesc2": "Your current workflow has unsaved changes.",
|
||||||
|
"clearWorkflow": "Clear Workflow",
|
||||||
|
"clearWorkflowDesc": "Clear this workflow and start a new one?",
|
||||||
|
"clearWorkflowDesc2": "Your current workflow has unsaved changes.",
|
||||||
"scheduler": "Scheduler",
|
"scheduler": "Scheduler",
|
||||||
"schedulerDescription": "TODO",
|
"schedulerDescription": "TODO",
|
||||||
"sDXLMainModelField": "SDXL Model",
|
"sDXLMainModelField": "SDXL Model",
|
||||||
@ -1216,6 +1230,7 @@
|
|||||||
"useCpuNoise": "Use CPU Noise",
|
"useCpuNoise": "Use CPU Noise",
|
||||||
"cpuNoise": "CPU Noise",
|
"cpuNoise": "CPU Noise",
|
||||||
"gpuNoise": "GPU Noise",
|
"gpuNoise": "GPU Noise",
|
||||||
|
"remixImage": "Remix Image",
|
||||||
"useInitImg": "Use Initial Image",
|
"useInitImg": "Use Initial Image",
|
||||||
"usePrompt": "Use Prompt",
|
"usePrompt": "Use Prompt",
|
||||||
"useSeed": "Use Seed",
|
"useSeed": "Use Seed",
|
||||||
@ -1361,6 +1376,7 @@
|
|||||||
"problemCopyingCanvasDesc": "Unable to export base layer",
|
"problemCopyingCanvasDesc": "Unable to export base layer",
|
||||||
"problemCopyingImage": "Unable to Copy Image",
|
"problemCopyingImage": "Unable to Copy Image",
|
||||||
"problemCopyingImageLink": "Unable to Copy Image Link",
|
"problemCopyingImageLink": "Unable to Copy Image Link",
|
||||||
|
"problemDownloadingImage": "Unable to Download Image",
|
||||||
"problemDownloadingCanvas": "Problem Downloading Canvas",
|
"problemDownloadingCanvas": "Problem Downloading Canvas",
|
||||||
"problemDownloadingCanvasDesc": "Unable to export base layer",
|
"problemDownloadingCanvasDesc": "Unable to export base layer",
|
||||||
"problemImportingMask": "Problem Importing Mask",
|
"problemImportingMask": "Problem Importing Mask",
|
||||||
@ -1452,9 +1468,7 @@
|
|||||||
},
|
},
|
||||||
"compositingCoherencePass": {
|
"compositingCoherencePass": {
|
||||||
"heading": "Coherence Pass",
|
"heading": "Coherence Pass",
|
||||||
"paragraphs": [
|
"paragraphs": ["A second round of denoising helps to composite the Inpainted/Outpainted image."]
|
||||||
"A second round of denoising helps to composite the Inpainted/Outpainted image."
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
"compositingCoherenceMode": {
|
"compositingCoherenceMode": {
|
||||||
"heading": "Mode",
|
"heading": "Mode",
|
||||||
@ -1462,10 +1476,7 @@
|
|||||||
},
|
},
|
||||||
"compositingCoherenceSteps": {
|
"compositingCoherenceSteps": {
|
||||||
"heading": "Steps",
|
"heading": "Steps",
|
||||||
"paragraphs": [
|
"paragraphs": ["Number of denoising steps used in the Coherence Pass.", "Same as the main Steps parameter."]
|
||||||
"Number of denoising steps used in the Coherence Pass.",
|
|
||||||
"Same as the main Steps parameter."
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
"compositingStrength": {
|
"compositingStrength": {
|
||||||
"heading": "Strength",
|
"heading": "Strength",
|
||||||
@ -1487,15 +1498,11 @@
|
|||||||
},
|
},
|
||||||
"controlNetControlMode": {
|
"controlNetControlMode": {
|
||||||
"heading": "Control Mode",
|
"heading": "Control Mode",
|
||||||
"paragraphs": [
|
"paragraphs": ["Lends more weight to either the prompt or ControlNet."]
|
||||||
"Lends more weight to either the prompt or ControlNet."
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
"controlNetResizeMode": {
|
"controlNetResizeMode": {
|
||||||
"heading": "Resize Mode",
|
"heading": "Resize Mode",
|
||||||
"paragraphs": [
|
"paragraphs": ["How the ControlNet image will be fit to the image output size."]
|
||||||
"How the ControlNet image will be fit to the image output size."
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
"controlNet": {
|
"controlNet": {
|
||||||
"heading": "ControlNet",
|
"heading": "ControlNet",
|
||||||
@ -1505,9 +1512,7 @@
|
|||||||
},
|
},
|
||||||
"controlNetWeight": {
|
"controlNetWeight": {
|
||||||
"heading": "Weight",
|
"heading": "Weight",
|
||||||
"paragraphs": [
|
"paragraphs": ["How strongly the ControlNet will impact the generated image."]
|
||||||
"How strongly the ControlNet will impact the generated image."
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
"dynamicPrompts": {
|
"dynamicPrompts": {
|
||||||
"heading": "Dynamic Prompts",
|
"heading": "Dynamic Prompts",
|
||||||
@ -1519,9 +1524,7 @@
|
|||||||
},
|
},
|
||||||
"dynamicPromptsMaxPrompts": {
|
"dynamicPromptsMaxPrompts": {
|
||||||
"heading": "Max Prompts",
|
"heading": "Max Prompts",
|
||||||
"paragraphs": [
|
"paragraphs": ["Limits the number of prompts that can be generated by Dynamic Prompts."]
|
||||||
"Limits the number of prompts that can be generated by Dynamic Prompts."
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
"dynamicPromptsSeedBehaviour": {
|
"dynamicPromptsSeedBehaviour": {
|
||||||
"heading": "Seed Behaviour",
|
"heading": "Seed Behaviour",
|
||||||
@ -1538,9 +1541,7 @@
|
|||||||
},
|
},
|
||||||
"lora": {
|
"lora": {
|
||||||
"heading": "LoRA Weight",
|
"heading": "LoRA Weight",
|
||||||
"paragraphs": [
|
"paragraphs": ["Higher LoRA weight will lead to larger impacts on the final image."]
|
||||||
"Higher LoRA weight will lead to larger impacts on the final image."
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
"noiseUseCPU": {
|
"noiseUseCPU": {
|
||||||
"heading": "Use CPU Noise",
|
"heading": "Use CPU Noise",
|
||||||
@ -1552,9 +1553,7 @@
|
|||||||
},
|
},
|
||||||
"paramCFGScale": {
|
"paramCFGScale": {
|
||||||
"heading": "CFG Scale",
|
"heading": "CFG Scale",
|
||||||
"paragraphs": [
|
"paragraphs": ["Controls how much your prompt influences the generation process."]
|
||||||
"Controls how much your prompt influences the generation process."
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
"paramCFGRescaleMultiplier": {
|
"paramCFGRescaleMultiplier": {
|
||||||
"heading": "CFG Rescale Multiplier",
|
"heading": "CFG Rescale Multiplier",
|
||||||
@ -1606,9 +1605,7 @@
|
|||||||
},
|
},
|
||||||
"paramVAE": {
|
"paramVAE": {
|
||||||
"heading": "VAE",
|
"heading": "VAE",
|
||||||
"paragraphs": [
|
"paragraphs": ["Model used for translating AI output into the final image."]
|
||||||
"Model used for translating AI output into the final image."
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
"paramVAEPrecision": {
|
"paramVAEPrecision": {
|
||||||
"heading": "VAE Precision",
|
"heading": "VAE Precision",
|
||||||
@ -1697,6 +1694,7 @@
|
|||||||
"workflowLibrary": "Library",
|
"workflowLibrary": "Library",
|
||||||
"userWorkflows": "My Workflows",
|
"userWorkflows": "My Workflows",
|
||||||
"defaultWorkflows": "Default Workflows",
|
"defaultWorkflows": "Default Workflows",
|
||||||
|
"projectWorkflows": "Project Workflows",
|
||||||
"openWorkflow": "Open Workflow",
|
"openWorkflow": "Open Workflow",
|
||||||
"uploadWorkflow": "Load from File",
|
"uploadWorkflow": "Load from File",
|
||||||
"deleteWorkflow": "Delete Workflow",
|
"deleteWorkflow": "Delete Workflow",
|
||||||
@ -1704,11 +1702,13 @@
|
|||||||
"downloadWorkflow": "Save to File",
|
"downloadWorkflow": "Save to File",
|
||||||
"saveWorkflow": "Save Workflow",
|
"saveWorkflow": "Save Workflow",
|
||||||
"saveWorkflowAs": "Save Workflow As",
|
"saveWorkflowAs": "Save Workflow As",
|
||||||
|
"saveWorkflowToProject": "Save Workflow to Project",
|
||||||
"savingWorkflow": "Saving Workflow...",
|
"savingWorkflow": "Saving Workflow...",
|
||||||
"problemSavingWorkflow": "Problem Saving Workflow",
|
"problemSavingWorkflow": "Problem Saving Workflow",
|
||||||
"workflowSaved": "Workflow Saved",
|
"workflowSaved": "Workflow Saved",
|
||||||
"noRecentWorkflows": "No Recent Workflows",
|
"noRecentWorkflows": "No Recent Workflows",
|
||||||
"noUserWorkflows": "No User Workflows",
|
"noUserWorkflows": "No User Workflows",
|
||||||
|
"noWorkflows": "No Workflows",
|
||||||
"noSystemWorkflows": "No System Workflows",
|
"noSystemWorkflows": "No System Workflows",
|
||||||
"problemLoading": "Problem Loading Workflows",
|
"problemLoading": "Problem Loading Workflows",
|
||||||
"loading": "Loading Workflows",
|
"loading": "Loading Workflows",
|
||||||
@ -1717,6 +1717,7 @@
|
|||||||
"clearWorkflowSearchFilter": "Clear Workflow Search Filter",
|
"clearWorkflowSearchFilter": "Clear Workflow Search Filter",
|
||||||
"workflowName": "Workflow Name",
|
"workflowName": "Workflow Name",
|
||||||
"newWorkflowCreated": "New Workflow Created",
|
"newWorkflowCreated": "New Workflow Created",
|
||||||
|
"workflowCleared": "Workflow Cleared",
|
||||||
"workflowEditorMenu": "Workflow Editor Menu",
|
"workflowEditorMenu": "Workflow Editor Menu",
|
||||||
"workflowIsOpen": "Workflow is Open"
|
"workflowIsOpen": "Workflow is Open"
|
||||||
},
|
},
|
||||||
|
@ -118,7 +118,15 @@
|
|||||||
"advancedOptions": "Opzioni avanzate",
|
"advancedOptions": "Opzioni avanzate",
|
||||||
"free": "Libero",
|
"free": "Libero",
|
||||||
"or": "o",
|
"or": "o",
|
||||||
"preferencesLabel": "Preferenze"
|
"preferencesLabel": "Preferenze",
|
||||||
|
"red": "Rosso",
|
||||||
|
"aboutHeading": "Possiedi il tuo potere creativo",
|
||||||
|
"aboutDesc": "Utilizzi Invoke per lavoro? Guarda qui:",
|
||||||
|
"localSystem": "Sistema locale",
|
||||||
|
"green": "Verde",
|
||||||
|
"blue": "Blu",
|
||||||
|
"alpha": "Alfa",
|
||||||
|
"copy": "Copia"
|
||||||
},
|
},
|
||||||
"gallery": {
|
"gallery": {
|
||||||
"generations": "Generazioni",
|
"generations": "Generazioni",
|
||||||
@ -377,7 +385,11 @@
|
|||||||
"desc": "Apre e chiude le opzioni e i pannelli della galleria",
|
"desc": "Apre e chiude le opzioni e i pannelli della galleria",
|
||||||
"title": "Attiva/disattiva le Opzioni e la Galleria"
|
"title": "Attiva/disattiva le Opzioni e la Galleria"
|
||||||
},
|
},
|
||||||
"clearSearch": "Cancella ricerca"
|
"clearSearch": "Cancella ricerca",
|
||||||
|
"remixImage": {
|
||||||
|
"desc": "Utilizza tutti i parametri tranne il seme dell'immagine corrente",
|
||||||
|
"title": "Remixa l'immagine"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"modelManager": {
|
"modelManager": {
|
||||||
"modelManager": "Gestione Modelli",
|
"modelManager": "Gestione Modelli",
|
||||||
@ -521,7 +533,8 @@
|
|||||||
"customConfigFileLocation": "Posizione del file di configurazione personalizzato",
|
"customConfigFileLocation": "Posizione del file di configurazione personalizzato",
|
||||||
"vaePrecision": "Precisione VAE",
|
"vaePrecision": "Precisione VAE",
|
||||||
"noModelSelected": "Nessun modello selezionato",
|
"noModelSelected": "Nessun modello selezionato",
|
||||||
"conversionNotSupported": "Conversione non supportata"
|
"conversionNotSupported": "Conversione non supportata",
|
||||||
|
"configFile": "File di configurazione"
|
||||||
},
|
},
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"images": "Immagini",
|
"images": "Immagini",
|
||||||
@ -660,7 +673,10 @@
|
|||||||
"lockAspectRatio": "Blocca proporzioni",
|
"lockAspectRatio": "Blocca proporzioni",
|
||||||
"swapDimensions": "Scambia dimensioni",
|
"swapDimensions": "Scambia dimensioni",
|
||||||
"aspect": "Aspetto",
|
"aspect": "Aspetto",
|
||||||
"setToOptimalSizeTooLarge": "$t(parameters.setToOptimalSize) (potrebbe essere troppo grande)"
|
"setToOptimalSizeTooLarge": "$t(parameters.setToOptimalSize) (potrebbe essere troppo grande)",
|
||||||
|
"boxBlur": "Box",
|
||||||
|
"gaussianBlur": "Gaussian",
|
||||||
|
"remixImage": "Remixa l'immagine"
|
||||||
},
|
},
|
||||||
"settings": {
|
"settings": {
|
||||||
"models": "Modelli",
|
"models": "Modelli",
|
||||||
@ -794,7 +810,9 @@
|
|||||||
"invalidUpload": "Caricamento non valido",
|
"invalidUpload": "Caricamento non valido",
|
||||||
"problemDeletingWorkflow": "Problema durante l'eliminazione del flusso di lavoro",
|
"problemDeletingWorkflow": "Problema durante l'eliminazione del flusso di lavoro",
|
||||||
"workflowDeleted": "Flusso di lavoro eliminato",
|
"workflowDeleted": "Flusso di lavoro eliminato",
|
||||||
"problemRetrievingWorkflow": "Problema nel recupero del flusso di lavoro"
|
"problemRetrievingWorkflow": "Problema nel recupero del flusso di lavoro",
|
||||||
|
"resetInitialImage": "Reimposta l'immagine iniziale",
|
||||||
|
"uploadInitialImage": "Carica l'immagine iniziale"
|
||||||
},
|
},
|
||||||
"tooltip": {
|
"tooltip": {
|
||||||
"feature": {
|
"feature": {
|
||||||
@ -899,7 +917,8 @@
|
|||||||
"loadMore": "Carica altro",
|
"loadMore": "Carica altro",
|
||||||
"mode": "Modalità",
|
"mode": "Modalità",
|
||||||
"resetUI": "$t(accessibility.reset) l'Interfaccia Utente",
|
"resetUI": "$t(accessibility.reset) l'Interfaccia Utente",
|
||||||
"createIssue": "Segnala un problema"
|
"createIssue": "Segnala un problema",
|
||||||
|
"about": "Informazioni"
|
||||||
},
|
},
|
||||||
"ui": {
|
"ui": {
|
||||||
"hideProgressImages": "Nascondi avanzamento immagini",
|
"hideProgressImages": "Nascondi avanzamento immagini",
|
||||||
@ -1232,7 +1251,11 @@
|
|||||||
"scribble": "Scarabocchio",
|
"scribble": "Scarabocchio",
|
||||||
"amult": "Angolo di illuminazione",
|
"amult": "Angolo di illuminazione",
|
||||||
"coarse": "Approssimativo",
|
"coarse": "Approssimativo",
|
||||||
"resizeSimple": "Ridimensiona (semplice)"
|
"resizeSimple": "Ridimensiona (semplice)",
|
||||||
|
"large": "Grande",
|
||||||
|
"small": "Piccolo",
|
||||||
|
"depthAnythingDescription": "Generazione di mappe di profondità utilizzando la tecnica Depth Anything",
|
||||||
|
"modelSize": "Dimensioni del modello"
|
||||||
},
|
},
|
||||||
"queue": {
|
"queue": {
|
||||||
"queueFront": "Aggiungi all'inizio della coda",
|
"queueFront": "Aggiungi all'inizio della coda",
|
||||||
@ -1664,7 +1687,9 @@
|
|||||||
"userWorkflows": "I miei flussi di lavoro",
|
"userWorkflows": "I miei flussi di lavoro",
|
||||||
"newWorkflowCreated": "Nuovo flusso di lavoro creato",
|
"newWorkflowCreated": "Nuovo flusso di lavoro creato",
|
||||||
"downloadWorkflow": "Salva su file",
|
"downloadWorkflow": "Salva su file",
|
||||||
"uploadWorkflow": "Carica da file"
|
"uploadWorkflow": "Carica da file",
|
||||||
|
"projectWorkflows": "Flussi di lavoro del progetto",
|
||||||
|
"noWorkflows": "Nessun flusso di lavoro"
|
||||||
},
|
},
|
||||||
"app": {
|
"app": {
|
||||||
"storeNotInitialized": "Il negozio non è inizializzato"
|
"storeNotInitialized": "Il negozio non è inizializzato"
|
||||||
|
@ -1,30 +1,36 @@
|
|||||||
{
|
{
|
||||||
"accessibility": {
|
"accessibility": {
|
||||||
"invokeProgressBar": "Invoke ilerleme durumu",
|
"invokeProgressBar": "Invoke durum çubuğu",
|
||||||
"nextImage": "Sonraki Resim",
|
"nextImage": "Sonraki Görsel",
|
||||||
"useThisParameter": "Kullanıcı parametreleri",
|
"useThisParameter": "Bu ayarları kullan",
|
||||||
"copyMetadataJson": "Metadata verilerini kopyala (JSON)",
|
"copyMetadataJson": "Üstveriyi kopyala (JSON)",
|
||||||
"exitViewer": "Görüntüleme Modundan Çık",
|
"exitViewer": "Görüntüleyiciden Çık",
|
||||||
"zoomIn": "Yakınlaştır",
|
"zoomIn": "Yakınlaştır",
|
||||||
"zoomOut": "Uzaklaştır",
|
"zoomOut": "Uzaklaştır",
|
||||||
"rotateCounterClockwise": "Döndür (Saat yönünün tersine)",
|
"rotateCounterClockwise": "Saat yönünün tersine döndür",
|
||||||
"rotateClockwise": "Döndür (Saat yönünde)",
|
"rotateClockwise": "Saat yönüne döndür",
|
||||||
"flipHorizontally": "Yatay Çevir",
|
"flipHorizontally": "Yatay Çevir",
|
||||||
"flipVertically": "Dikey Çevir",
|
"flipVertically": "Dikey Çevir",
|
||||||
"modifyConfig": "Ayarları Değiştir",
|
"modifyConfig": "Ayarları Değiştir",
|
||||||
"toggleAutoscroll": "Otomatik kaydırmayı aç/kapat",
|
"toggleAutoscroll": "Otomatik kaydırmayı Aç-Kapat",
|
||||||
"toggleLogViewer": "Günlük Görüntüleyici Aç/Kapa",
|
"toggleLogViewer": "Günlüğü Aç-Kapat",
|
||||||
"showOptionsPanel": "Ayarlar Panelini Göster",
|
"showOptionsPanel": "Yan Paneli Göster",
|
||||||
"modelSelect": "Model Seçin",
|
"modelSelect": "Model Seçimi",
|
||||||
"reset": "Sıfırla",
|
"reset": "Resetle",
|
||||||
"uploadImage": "Resim Yükle",
|
"uploadImage": "Görsel Yükle",
|
||||||
"previousImage": "Önceki Resim",
|
"previousImage": "Önceki Görsel",
|
||||||
"menu": "Menü"
|
"menu": "Menü",
|
||||||
|
"about": "Hakkında",
|
||||||
|
"mode": "Kip",
|
||||||
|
"resetUI": "$t(accessibility.reset)Arayüz",
|
||||||
|
"showGalleryPanel": "Galeri Panelini Göster",
|
||||||
|
"loadMore": "Daha Getir",
|
||||||
|
"createIssue": "Sorun Bildir"
|
||||||
},
|
},
|
||||||
"common": {
|
"common": {
|
||||||
"hotkeysLabel": "Kısayol Tuşları",
|
"hotkeysLabel": "Kısayol Tuşları",
|
||||||
"languagePickerLabel": "Dil Seçimi",
|
"languagePickerLabel": "Dil",
|
||||||
"reportBugLabel": "Hata Bildir",
|
"reportBugLabel": "Sorun Bildir",
|
||||||
"githubLabel": "Github",
|
"githubLabel": "Github",
|
||||||
"discordLabel": "Discord",
|
"discordLabel": "Discord",
|
||||||
"settingsLabel": "Ayarlar",
|
"settingsLabel": "Ayarlar",
|
||||||
@ -37,22 +43,636 @@
|
|||||||
"langJapanese": "Japonca",
|
"langJapanese": "Japonca",
|
||||||
"langPolish": "Lehçe",
|
"langPolish": "Lehçe",
|
||||||
"langPortuguese": "Portekizce",
|
"langPortuguese": "Portekizce",
|
||||||
"langBrPortuguese": "Portekizcr (Brezilya)",
|
"langBrPortuguese": "Portekizce (Brezilya)",
|
||||||
"langRussian": "Rusça",
|
"langRussian": "Rusça",
|
||||||
"langSimplifiedChinese": "Çince (Basit)",
|
"langSimplifiedChinese": "Çince (Basit)",
|
||||||
"langUkranian": "Ukraynaca",
|
"langUkranian": "Ukraynaca",
|
||||||
"langSpanish": "İspanyolca",
|
"langSpanish": "İspanyolca",
|
||||||
"txt2img": "Metinden Resime",
|
"txt2img": "Yazıdan Görsel",
|
||||||
"img2img": "Resimden Metine",
|
"img2img": "Görselden Görsel",
|
||||||
"linear": "Çizgisel",
|
"linear": "Doğrusal",
|
||||||
"nodes": "Düğümler",
|
"nodes": "İş Akışı Düzenleyici",
|
||||||
"postprocessing": "İşlem Sonrası",
|
"postprocessing": "Rötuş",
|
||||||
"postProcessing": "İşlem Sonrası",
|
"postProcessing": "Rötuş",
|
||||||
"postProcessDesc2": "Daha gelişmiş özellikler için ve iş akışını kolaylaştırmak için özel bir kullanıcı arayüzü çok yakında yayınlanacaktır.",
|
"postProcessDesc2": "Daha gelişmiş iş akışlarına olanak sağlayacak özel bir arayüz yakında yayınlanacaktır.",
|
||||||
"postProcessDesc3": "Invoke AI komut satırı arayüzü, bir çok yeni özellik sunmaktadır.",
|
"postProcessDesc3": "Invoke AI Komut Satırı Arayüzü, içlerinde Embiggen da bulunan birçok özellik sunmaktadır.",
|
||||||
"langKorean": "Korece",
|
"langKorean": "Korece",
|
||||||
"unifiedCanvas": "Akıllı Tuval",
|
"unifiedCanvas": "Tuval",
|
||||||
"nodesDesc": "Görüntülerin oluşturulmasında hazırladığımız yeni bir sistem geliştirme aşamasındadır. Bu harika özellikler ve çok daha fazlası için bizi takip etmeye devam edin.",
|
"nodesDesc": "Görsel oluşturmaya yardımcı çizge tabanlı sistem şimdilik geliştirme aşamasındadır. Bu süper özellik hakkındaki gelişmeler için kulağınız bizde olsun.",
|
||||||
"postProcessDesc1": "Invoke AI son kullanıcıya yönelik bir çok özellik sunar. Görüntü kalitesi yükseltme, yüz restorasyonu WebUI üzerinden kullanılabilir. Metinden resime ve resimden metne araçlarına gelişmiş seçenekler menüsünden ulaşabilirsiniz. İsterseniz mevcut görüntü ekranının üzerindeki veya görüntüleyicideki görüntüyü doğrudan düzenleyebilirsiniz."
|
"postProcessDesc1": "Invoke AI birçok rötuş (post-process) aracı sağlar. Görsel büyütme ve yüz iyileştirme WebUI üzerinden kullanıma uygun durumdadır. Bunlara Yazıdan Görsel ve Görselden Görsel sekmelerindeki Gelişmiş Ayarlar menüsünden ulaşabilirsiniz. Ayrıca var olan görseli üzerindeki düğmeler yardımıyla düzenleyebilirsiniz.",
|
||||||
|
"batch": "Toplu İş Yöneticisi",
|
||||||
|
"accept": "Onayla",
|
||||||
|
"cancel": "Vazgeç",
|
||||||
|
"advanced": "Gelişmiş",
|
||||||
|
"copyError": "$t(gallery.copy) Hata",
|
||||||
|
"on": "Açık",
|
||||||
|
"or": "ya da",
|
||||||
|
"aboutDesc": "Invoke'u iş için mi kullanıyorsunuz? Şuna bir göz atın:",
|
||||||
|
"advancedOptions": "Gelişmiş Ayarlar",
|
||||||
|
"ai": "yapay zeka",
|
||||||
|
"close": "Kapat",
|
||||||
|
"auto": "Otomatik",
|
||||||
|
"communityLabel": "Topluluk",
|
||||||
|
"back": "Geri",
|
||||||
|
"areYouSure": "Emin misiniz?",
|
||||||
|
"notInstalled": "$t(common.installed) Değil",
|
||||||
|
"openInNewTab": "Yeni Sekmede Aç",
|
||||||
|
"aboutHeading": "Yaratıcı Gücünüzün Sahibi Olun",
|
||||||
|
"lightMode": "Açık Tema",
|
||||||
|
"load": "Yükle",
|
||||||
|
"loading": "Yükleniyor",
|
||||||
|
"loadingInvokeAI": "Invoke AI Yükleniyor",
|
||||||
|
"localSystem": "Yerel Sistem",
|
||||||
|
"inpaint": "içboyama",
|
||||||
|
"modelManager": "Model Yöneticisi",
|
||||||
|
"orderBy": "Sırala",
|
||||||
|
"outpaint": "dışboyama",
|
||||||
|
"outputs": "Çıktılar",
|
||||||
|
"langHebrew": "İbranice",
|
||||||
|
"learnMore": "Bilgi Edin",
|
||||||
|
"nodeEditor": "Çizge Düzenleyici",
|
||||||
|
"save": "Kaydet",
|
||||||
|
"statusMergingModels": "Modeller Birleştiriliyor",
|
||||||
|
"statusGenerating": "Oluşturuluyor",
|
||||||
|
"statusGenerationComplete": "Oluşturma Bitti",
|
||||||
|
"statusGeneratingOutpainting": "Dışboyama Oluşturuluyor",
|
||||||
|
"statusLoadingModel": "Model Yükleniyor",
|
||||||
|
"random": "Rastgele",
|
||||||
|
"simple": "Basit",
|
||||||
|
"preferencesLabel": "Seçenekler",
|
||||||
|
"statusConnected": "Bağlandı",
|
||||||
|
"statusMergedModels": "Modeller Birleştirildi",
|
||||||
|
"statusModelChanged": "Model Değişti",
|
||||||
|
"statusModelConverted": "Model Dönüştürüldü",
|
||||||
|
"statusPreparing": "Hazırlanıyor",
|
||||||
|
"statusProcessing": "İşleniyor",
|
||||||
|
"statusProcessingCanceled": "İşlemden Vazgeçildi",
|
||||||
|
"statusRestoringFacesCodeFormer": "Yüzler İyileştiriliyor (CodeFormer)",
|
||||||
|
"statusRestoringFacesGFPGAN": "Yüzler İyileştiriliyor (GFPGAN)",
|
||||||
|
"template": "Şablon",
|
||||||
|
"saveAs": "Farklı Kaydet",
|
||||||
|
"statusProcessingComplete": "İşlem Bitti",
|
||||||
|
"statusSavingImage": "Görsel Kaydediliyor",
|
||||||
|
"somethingWentWrong": "Bir sorun oluştu",
|
||||||
|
"statusConvertingModel": "Model Dönüştürülüyor",
|
||||||
|
"statusDisconnected": "Bağlantı Kesildi",
|
||||||
|
"statusError": "Hata",
|
||||||
|
"statusGeneratingImageToImage": "Görselden Görsel Oluşturuluyor",
|
||||||
|
"statusGeneratingInpainting": "İçboyama Oluşturuluyor",
|
||||||
|
"statusRestoringFaces": "Yüzler İyileştiriliyor",
|
||||||
|
"statusUpscaling": "Büyütme",
|
||||||
|
"statusUpscalingESRGAN": "Büyütme (ESRGAN)",
|
||||||
|
"training": "Eğitim",
|
||||||
|
"statusGeneratingTextToImage": "Yazıdan Görsel Oluşturuluyor",
|
||||||
|
"imagePrompt": "Görsel İstemi",
|
||||||
|
"unknown": "Bilinmeyen",
|
||||||
|
"green": "Yeşil",
|
||||||
|
"red": "Kırmızı",
|
||||||
|
"blue": "Mavi",
|
||||||
|
"alpha": "Alfa",
|
||||||
|
"file": "Dosya",
|
||||||
|
"folder": "Klasör",
|
||||||
|
"format": "biçim",
|
||||||
|
"details": "Ayrıntılar",
|
||||||
|
"error": "Hata",
|
||||||
|
"generate": "Oluştur",
|
||||||
|
"free": "Serbest",
|
||||||
|
"imageFailedToLoad": "Görsel Yüklenemedi",
|
||||||
|
"safetensors": "Safetensors",
|
||||||
|
"upload": "Yükle",
|
||||||
|
"nextPage": "Sonraki Sayfa",
|
||||||
|
"prevPage": "Önceki Sayfa",
|
||||||
|
"dontAskMeAgain": "Bir daha sorma",
|
||||||
|
"delete": "Kaldır",
|
||||||
|
"direction": "Yön",
|
||||||
|
"darkMode": "Koyu Tema",
|
||||||
|
"unsaved": "Kaydedilmemiş",
|
||||||
|
"unknownError": "Bilinmeyen Hata",
|
||||||
|
"installed": "Yüklü",
|
||||||
|
"data": "Veri",
|
||||||
|
"input": "Giriş",
|
||||||
|
"copy": "Kopyala",
|
||||||
|
"created": "Yaratma",
|
||||||
|
"updated": "Güncelleme"
|
||||||
|
},
|
||||||
|
"accordions": {
|
||||||
|
"generation": {
|
||||||
|
"title": "Oluşturma",
|
||||||
|
"modelTab": "Model",
|
||||||
|
"conceptsTab": "Kavramlar"
|
||||||
|
},
|
||||||
|
"image": {
|
||||||
|
"title": "Görsel"
|
||||||
|
},
|
||||||
|
"advanced": {
|
||||||
|
"title": "Gelişmiş"
|
||||||
|
},
|
||||||
|
"compositing": {
|
||||||
|
"title": "Birleştirme",
|
||||||
|
"coherenceTab": "Uyum Geçişi",
|
||||||
|
"infillTab": "Doldurma"
|
||||||
|
},
|
||||||
|
"control": {
|
||||||
|
"ipTab": "Görsel İstemleri"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"boards": {
|
||||||
|
"autoAddBoard": "Panoya Otomatik Ekleme",
|
||||||
|
"cancel": "Vazgeç",
|
||||||
|
"clearSearch": "Aramayı Sil",
|
||||||
|
"deleteBoard": "Panoyu Sil",
|
||||||
|
"loading": "Yükleniyor...",
|
||||||
|
"myBoard": "Panom",
|
||||||
|
"selectBoard": "Bir Pano Seç",
|
||||||
|
"addBoard": "Pano Ekle",
|
||||||
|
"deleteBoardAndImages": "Panoyu ve Görselleri Sil",
|
||||||
|
"deleteBoardOnly": "Sadece Panoyu Sil",
|
||||||
|
"deletedBoardsCannotbeRestored": "Silinen panolar geri getirilemez",
|
||||||
|
"menuItemAutoAdd": "Bu panoya otomatik olarak ekle",
|
||||||
|
"move": "Taşı",
|
||||||
|
"movingImagesToBoard_one": "{{count}} görseli şu panoya taşı:",
|
||||||
|
"movingImagesToBoard_other": "{{count}} görseli şu panoya taşı:",
|
||||||
|
"noMatching": "Eşleşen pano yok",
|
||||||
|
"searchBoard": "Pano Ara...",
|
||||||
|
"topMessage": "Bu pano, şuralarda kullanılan görseller içeriyor:",
|
||||||
|
"downloadBoard": "Panoyu İndir",
|
||||||
|
"uncategorized": "Kategorisiz",
|
||||||
|
"changeBoard": "Panoyu Değiştir",
|
||||||
|
"bottomMessage": "Bu panoyu ve görselleri silmek, bunları kullanan özelliklerin resetlemesine neden olacaktır."
|
||||||
|
},
|
||||||
|
"controlnet": {
|
||||||
|
"balanced": "Dengeli",
|
||||||
|
"contentShuffle": "İçerik Karıştırma",
|
||||||
|
"contentShuffleDescription": "Görselin içeriğini karıştırır",
|
||||||
|
"depthZoe": "Derinlik (Zoe)",
|
||||||
|
"depthZoeDescription": "Zoe kullanarak derinlik haritası oluşturma",
|
||||||
|
"resizeMode": "Boyutlandırma Kipi",
|
||||||
|
"addControlNet": "$t(common.controlNet) Ekle",
|
||||||
|
"addIPAdapter": "$t(common.ipAdapter) Ekle",
|
||||||
|
"addT2IAdapter": "$t(common.t2iAdapter) Ekle",
|
||||||
|
"controlNetEnabledT2IDisabled": "$t(common.controlNet) etkin, $t(common.t2iAdapter)s etkin değil",
|
||||||
|
"t2iEnabledControlNetDisabled": "$t(common.t2iAdapter) etkin, $t(common.controlNet)s etkin değil",
|
||||||
|
"colorMap": "Renk",
|
||||||
|
"crop": "Kırpma",
|
||||||
|
"delete": "Kaldır",
|
||||||
|
"depthMidas": "Derinlik (Midas)",
|
||||||
|
"depthMidasDescription": "Midas kullanarak derinlik haritası oluşturma",
|
||||||
|
"detectResolution": "Çözünürlüğü Bul",
|
||||||
|
"none": "Hiçbiri",
|
||||||
|
"noneDescription": "Hiçbir işlem uygulanmamış",
|
||||||
|
"selectModel": "Model seçin",
|
||||||
|
"showAdvanced": "Gelişmiş Ayarları Göster",
|
||||||
|
"controlNetT2IMutexDesc": "$t(common.controlNet) ve $t(common.t2iAdapter)'nün birlikte kullanımı şimdilik desteklenmiyor.",
|
||||||
|
"canny": "Canny",
|
||||||
|
"colorMapDescription": "Görselden renk haritası oluşturur",
|
||||||
|
"handAndFace": "El ve Yüz",
|
||||||
|
"processor": "İşlemci",
|
||||||
|
"prompt": "İstem",
|
||||||
|
"duplicate": "Kopyala",
|
||||||
|
"large": "Büyük",
|
||||||
|
"modelSize": "Model Boyutu",
|
||||||
|
"resize": "Boyutlandır",
|
||||||
|
"resizeSimple": "Boyutlandır (Basit)",
|
||||||
|
"safe": "Güvenli",
|
||||||
|
"small": "Küçük",
|
||||||
|
"weight": "Etki",
|
||||||
|
"cannyDescription": "Canny kenar algılama",
|
||||||
|
"fill": "Doldur",
|
||||||
|
"highThreshold": "Üst Eşik",
|
||||||
|
"imageResolution": "Görsel Çözünürlüğü",
|
||||||
|
"colorMapTileSize": "Karo Boyutu",
|
||||||
|
"importImageFromCanvas": "Tuvaldeki Görseli Al",
|
||||||
|
"importMaskFromCanvas": "Tuvalden Maskeyi İçe Aktar",
|
||||||
|
"lowThreshold": "Alt Eşik",
|
||||||
|
"base": "Taban",
|
||||||
|
"depthAnythingDescription": "Depth Anything yöntemi ile derinlik haritası oluşturma"
|
||||||
|
},
|
||||||
|
"queue": {
|
||||||
|
"queuedCount": "{{pending}} Sırada",
|
||||||
|
"resumeSucceeded": "İşlem Sürdürüldü",
|
||||||
|
"openQueue": "Sırayı Göster",
|
||||||
|
"cancelSucceeded": "İş Geri Çekildi",
|
||||||
|
"cancelFailed": "İşi Geri Çekmede Sorun",
|
||||||
|
"prune": "Arındır",
|
||||||
|
"pruneTooltip": "{{item_count}} Bitmiş İşi Sil",
|
||||||
|
"resumeFailed": "İşlemi Sürdürmede Sorun",
|
||||||
|
"pauseFailed": "İşlemi Duraklatmada Sorun",
|
||||||
|
"cancelBatchSucceeded": "Toplu İşten Vazgeçildi",
|
||||||
|
"pruneSucceeded": "{{item_count}} Bitmiş İş Sıradan Silindi",
|
||||||
|
"in_progress": "İşleniyor",
|
||||||
|
"completed": "Bitti",
|
||||||
|
"canceled": "Vazgeçildi",
|
||||||
|
"back": "arka",
|
||||||
|
"queueFront": "Sıranın Başına Ekle",
|
||||||
|
"queueBack": "Sıraya Ekle",
|
||||||
|
"resumeTooltip": "İşlemi Sürdür",
|
||||||
|
"clearQueueAlertDialog2": "Sırayı boşaltmak istediğinizden emin misiniz?",
|
||||||
|
"batchQueuedDesc_one": "{{count}} iş sıranın {{direction}} eklendi",
|
||||||
|
"batchQueuedDesc_other": "{{count}} iş sıranın {{direction}} eklendi",
|
||||||
|
"batchFailedToQueue": "Toplu İş Sıraya Alınamadı",
|
||||||
|
"front": "ön",
|
||||||
|
"queue": "Sıra",
|
||||||
|
"resume": "Sürdür",
|
||||||
|
"queueTotal": "Toplam {{total}}",
|
||||||
|
"queueEmpty": "Sıra Boş",
|
||||||
|
"clearQueueAlertDialog": "Sırayı boşaltma düğmesi geçerli işlemi durdurur ve sırayı boşaltır.",
|
||||||
|
"current": "Şimdiki",
|
||||||
|
"time": "Süre",
|
||||||
|
"pause": "Duraklat",
|
||||||
|
"pauseTooltip": "İşlemi Duraklat",
|
||||||
|
"pruneFailed": "Sırayı Arındırmada Sorun",
|
||||||
|
"clearTooltip": "Vazgeç ve Tüm İşleri Sil",
|
||||||
|
"clear": "Boşalt",
|
||||||
|
"cancelBatchFailed": "Toplu İşten Vazgeçmede Sorun",
|
||||||
|
"next": "Sonraki",
|
||||||
|
"status": "Durum",
|
||||||
|
"failed": "Başarısız",
|
||||||
|
"item": "İş",
|
||||||
|
"enqueueing": "Toplu İş Sıraya Alınıyor",
|
||||||
|
"pauseSucceeded": "İşlem Duraklatıldı",
|
||||||
|
"cancel": "Vazgeç",
|
||||||
|
"cancelTooltip": "Bu İşi Geri Çek",
|
||||||
|
"clearSucceeded": "Sıra Boşaltıldı",
|
||||||
|
"clearFailed": "Sırayı Boşaltmada Sorun",
|
||||||
|
"cancelBatch": "Toplu İşten Vazgeç",
|
||||||
|
"cancelItem": "İşi Geri Çek",
|
||||||
|
"total": "Toplam",
|
||||||
|
"pending": "Sırada",
|
||||||
|
"completedIn": "'de bitirildi",
|
||||||
|
"batch": "Toplu İş",
|
||||||
|
"session": "Oturum",
|
||||||
|
"batchQueued": "Toplu İş Sıraya Alındı",
|
||||||
|
"notReady": "Sıraya Alınamadı",
|
||||||
|
"batchFieldValues": "Toplu İş Değişkenleri",
|
||||||
|
"queueMaxExceeded": "Sıra sınırı {{max_queue_size}} aşıldı, {{skip}} atlanıyor"
|
||||||
|
},
|
||||||
|
"invocationCache": {
|
||||||
|
"cacheSize": "Önbellek Boyutu",
|
||||||
|
"disable": "Kapat",
|
||||||
|
"clear": "Boşalt",
|
||||||
|
"maxCacheSize": "Maksimum Önbellek Boyutu",
|
||||||
|
"useCache": "Önbellek Kullan",
|
||||||
|
"enable": "Aç"
|
||||||
|
},
|
||||||
|
"gallery": {
|
||||||
|
"deleteImageBin": "Silinen görseller işletim sisteminin çöp kutusuna gönderilir.",
|
||||||
|
"deleteImagePermanent": "Silinen görseller geri getirilemez.",
|
||||||
|
"assets": "Özkaynaklar",
|
||||||
|
"autoAssignBoardOnClick": "Tıklanan Panoya Otomatik Atama",
|
||||||
|
"loading": "Yükleniyor",
|
||||||
|
"starImage": "Yıldız Koy",
|
||||||
|
"download": "İndir",
|
||||||
|
"deleteSelection": "Seçileni Sil",
|
||||||
|
"preparingDownloadFailed": "İndirme Hazırlanırken Sorun",
|
||||||
|
"problemDeletingImages": "Görsel Silmede Sorun",
|
||||||
|
"featuresWillReset": "Bu görseli silerseniz, o özellikler resetlenecektir.",
|
||||||
|
"galleryImageResetSize": "Boyutu Resetle",
|
||||||
|
"noImageSelected": "Görsel Seçilmedi",
|
||||||
|
"unstarImage": "Yıldızı Kaldır",
|
||||||
|
"uploads": "Yüklemeler",
|
||||||
|
"problemDeletingImagesDesc": "Bir ya da daha çok görsel silinemedi",
|
||||||
|
"gallerySettings": "Galeri Ayarları",
|
||||||
|
"image": "görsel",
|
||||||
|
"galleryImageSize": "Görsel Boyutu",
|
||||||
|
"allImagesLoaded": "Tüm Görseller Yüklendi",
|
||||||
|
"copy": "Kopyala",
|
||||||
|
"noImagesInGallery": "Gösterilecek Görsel Yok",
|
||||||
|
"autoSwitchNewImages": "Yeni Görseli Biter Bitmez Gör",
|
||||||
|
"maintainAspectRatio": "En-Boy Oranını Koru",
|
||||||
|
"currentlyInUse": "Bu görsel şurada kullanımda:",
|
||||||
|
"deleteImage": "Görseli Sil",
|
||||||
|
"loadMore": "Daha Getir",
|
||||||
|
"setCurrentImage": "Çalışma Görseli Yap",
|
||||||
|
"unableToLoad": "Galeri Yüklenemedi",
|
||||||
|
"downloadSelection": "Seçileni İndir",
|
||||||
|
"preparingDownload": "İndirmeye Hazırlanıyor",
|
||||||
|
"singleColumnLayout": "Tek Sütun Düzen",
|
||||||
|
"generations": "Çıktılar",
|
||||||
|
"showUploads": "Yüklenenleri Göster",
|
||||||
|
"showGenerations": "Çıktıları Göster"
|
||||||
|
},
|
||||||
|
"hrf": {
|
||||||
|
"hrf": "Yüksek Çözünürlük Kürü",
|
||||||
|
"enableHrf": "Yüksek Çözünürlük Kürünü Aç",
|
||||||
|
"hrfStrength": "Yüksek Çözünürlük Kürü Etkisi",
|
||||||
|
"strengthTooltip": "Düşük değerler daha az detaya neden olsa da olası bozuklukları önleyebilir.",
|
||||||
|
"metadata": {
|
||||||
|
"enabled": "Yüksek Çözünürlük Kürü Açık",
|
||||||
|
"strength": "Yüksek Çözünürlük Kürü Etkisi",
|
||||||
|
"method": "Yüksek Çözünürlük Kürü Yöntemi"
|
||||||
|
},
|
||||||
|
"upscaleMethod": "Büyütme Yöntemi",
|
||||||
|
"enableHrfTooltip": "Daha düşük bir çözünürlükle oluşturmaya başlar, ana çözünürlüğe büyütür ve Görselden Görsel'i çalıştırır."
|
||||||
|
},
|
||||||
|
"hotkeys": {
|
||||||
|
"noHotkeysFound": "Kısayol Tuşu Bulanamadı",
|
||||||
|
"searchHotkeys": "Kısayol Tuşlarında Ara",
|
||||||
|
"clearSearch": "Aramayı Sil",
|
||||||
|
"colorPicker": {
|
||||||
|
"title": "Renk Seçici",
|
||||||
|
"desc": "Tuvalde renk seçiciye geçer"
|
||||||
|
},
|
||||||
|
"consoleToggle": {
|
||||||
|
"title": "Konsolu Aç-Kapat",
|
||||||
|
"desc": "Konsolu aç-kapat"
|
||||||
|
},
|
||||||
|
"hideMask": {
|
||||||
|
"desc": "Maskeyi gizle-göster",
|
||||||
|
"title": "Maskeyi Gizle"
|
||||||
|
},
|
||||||
|
"focusPrompt": {
|
||||||
|
"title": "İsteme Odaklan",
|
||||||
|
"desc": "Görsel istemi alanına odaklanır"
|
||||||
|
},
|
||||||
|
"keyboardShortcuts": "Kısayol Tuşları",
|
||||||
|
"nextImage": {
|
||||||
|
"title": "Sonraki Görsel",
|
||||||
|
"desc": "Galerideki sonraki görseli göster"
|
||||||
|
},
|
||||||
|
"maximizeWorkSpace": {
|
||||||
|
"desc": "Panelleri kapat ve çalışma alanını genişlet",
|
||||||
|
"title": "Çalışma Alanını Genişlet"
|
||||||
|
},
|
||||||
|
"pinOptions": {
|
||||||
|
"desc": "Ayar panelini iğnele",
|
||||||
|
"title": "Ayarları İğnele"
|
||||||
|
},
|
||||||
|
"nodesHotkeys": "Çizgeler",
|
||||||
|
"quickToggleMove": {
|
||||||
|
"desc": "Geçici olarak Kayma Aracına geçer",
|
||||||
|
"title": "Geçici Kayma"
|
||||||
|
},
|
||||||
|
"showHideBoundingBox": {
|
||||||
|
"title": "Sınırlayıcı Kutuyu Gizle/Göster",
|
||||||
|
"desc": "Sınırlayıcı kutunun görünürlüğünü değiştir"
|
||||||
|
},
|
||||||
|
"showInfo": {
|
||||||
|
"desc": "Seçili görselin üstverisini göster",
|
||||||
|
"title": "Bilgileri Göster"
|
||||||
|
},
|
||||||
|
"nextStagingImage": {
|
||||||
|
"desc": "Sonraki Görsel Parçayı Göster",
|
||||||
|
"title": "Sonraki Görsel Parça"
|
||||||
|
},
|
||||||
|
"acceptStagingImage": {
|
||||||
|
"desc": "Geçiçi Görsel Parçasını Onayla",
|
||||||
|
"title": "Geçiçi Görsel Parçasını Onayla"
|
||||||
|
},
|
||||||
|
"changeTabs": {
|
||||||
|
"desc": "Çalışma alanını değiştir",
|
||||||
|
"title": "Sekmeyi değiştir"
|
||||||
|
},
|
||||||
|
"closePanels": {
|
||||||
|
"title": "Panelleri Kapat",
|
||||||
|
"desc": "Açık panelleri kapat"
|
||||||
|
},
|
||||||
|
"decreaseBrushOpacity": {
|
||||||
|
"title": "Fırça Saydamlığını Artır",
|
||||||
|
"desc": "Tuval fırçasının saydamlığını artırır"
|
||||||
|
},
|
||||||
|
"clearMask": {
|
||||||
|
"title": "Maskeyi Sil",
|
||||||
|
"desc": "Tüm maskeyi sil"
|
||||||
|
},
|
||||||
|
"decreaseGalleryThumbSize": {
|
||||||
|
"desc": "Galerideki küçük görsel boyutunu düşürür",
|
||||||
|
"title": "Küçük Görsel Boyutunu Düşür"
|
||||||
|
},
|
||||||
|
"deleteImage": {
|
||||||
|
"desc": "Seçili görseli sil",
|
||||||
|
"title": "Görseli Sil"
|
||||||
|
},
|
||||||
|
"invoke": {
|
||||||
|
"desc": "Görsel Oluştur",
|
||||||
|
"title": "Invoke"
|
||||||
|
},
|
||||||
|
"increaseGalleryThumbSize": {
|
||||||
|
"title": "Küçük Görsel Boyutunu Artır",
|
||||||
|
"desc": "Galerideki küçük görsel boyutunu artırır"
|
||||||
|
},
|
||||||
|
"setParameters": {
|
||||||
|
"title": "Değişkenleri Kullan",
|
||||||
|
"desc": "Seçili görselin tüm değişkenlerini kullan"
|
||||||
|
},
|
||||||
|
"setPrompt": {
|
||||||
|
"desc": "Seçili görselin istemini kullan",
|
||||||
|
"title": "İstemi Kullan"
|
||||||
|
},
|
||||||
|
"toggleLayer": {
|
||||||
|
"desc": "Maske/Taban katmanları arasında geçiş yapar",
|
||||||
|
"title": "Katmanı Gizle-Göster"
|
||||||
|
},
|
||||||
|
"upscale": {
|
||||||
|
"title": "Büyüt",
|
||||||
|
"desc": "Seçili görseli büyüt"
|
||||||
|
},
|
||||||
|
"setSeed": {
|
||||||
|
"title": "Tohumu Kullan",
|
||||||
|
"desc": "Seçili görselin tohumunu kullan"
|
||||||
|
},
|
||||||
|
"appHotkeys": "Uygulama",
|
||||||
|
"cancel": {
|
||||||
|
"desc": "Geçerli İşi Sil",
|
||||||
|
"title": "Vazgeç"
|
||||||
|
},
|
||||||
|
"sendToImageToImage": {
|
||||||
|
"title": "Görselden Görsel'e Gönder",
|
||||||
|
"desc": "Seçili görseli Görselden Görsel'e gönder"
|
||||||
|
},
|
||||||
|
"fillBoundingBox": {
|
||||||
|
"title": "Sınırlayıcı Kutuyu Doldur",
|
||||||
|
"desc": "Sınırlayıcı kutuyu fırçadaki renkle doldurur"
|
||||||
|
},
|
||||||
|
"moveTool": {
|
||||||
|
"desc": "Tuvalde kaymayı sağlar",
|
||||||
|
"title": "Kayma Aracı"
|
||||||
|
},
|
||||||
|
"redoStroke": {
|
||||||
|
"desc": "Fırça vuruşunu yinele",
|
||||||
|
"title": "Vuruşu Yinele"
|
||||||
|
},
|
||||||
|
"increaseBrushOpacity": {
|
||||||
|
"title": "Fırçanın Saydamlığını Düşür",
|
||||||
|
"desc": "Tuval fırçasının saydamlığını düşürür"
|
||||||
|
},
|
||||||
|
"selectEraser": {
|
||||||
|
"desc": "Tuval silgisini kullan",
|
||||||
|
"title": "Silgiyi Kullan"
|
||||||
|
},
|
||||||
|
"toggleOptions": {
|
||||||
|
"desc": "Ayarlar panelini aç-kapat",
|
||||||
|
"title": "Ayarları Aç-Kapat"
|
||||||
|
},
|
||||||
|
"copyToClipboard": {
|
||||||
|
"desc": "Tuval içeriğini kopyala",
|
||||||
|
"title": "Kopyala"
|
||||||
|
},
|
||||||
|
"galleryHotkeys": "Galeri",
|
||||||
|
"generalHotkeys": "Genel",
|
||||||
|
"mergeVisible": {
|
||||||
|
"desc": "Tuvalin görünür tüm katmanlarını birleştir",
|
||||||
|
"title": "Katmanları Birleştir"
|
||||||
|
},
|
||||||
|
"toggleGallery": {
|
||||||
|
"title": "Galeriyi Aç-Kapat",
|
||||||
|
"desc": "Galeri panelini aç-kapat"
|
||||||
|
},
|
||||||
|
"downloadImage": {
|
||||||
|
"title": "Görseli İndir",
|
||||||
|
"desc": "Tuval içeriğini indir"
|
||||||
|
},
|
||||||
|
"previousStagingImage": {
|
||||||
|
"title": "Önceki Görsel Parça",
|
||||||
|
"desc": "Önceki Görsel Parçayı Göster"
|
||||||
|
},
|
||||||
|
"increaseBrushSize": {
|
||||||
|
"title": "Fırça Boyutunu Artır",
|
||||||
|
"desc": "Tuval fırçasının/silgisinin boyutunu artırır"
|
||||||
|
},
|
||||||
|
"previousImage": {
|
||||||
|
"desc": "Galerideki önceki görseli göster",
|
||||||
|
"title": "Önceki Görsel"
|
||||||
|
},
|
||||||
|
"toggleOptionsAndGallery": {
|
||||||
|
"title": "Ayarları ve Galeriyi Aç-Kapat",
|
||||||
|
"desc": "Ayarlar ve galeri panellerini aç-kapat"
|
||||||
|
},
|
||||||
|
"toggleSnap": {
|
||||||
|
"desc": "Kılavuza Uydur",
|
||||||
|
"title": "Kılavuza Uydur"
|
||||||
|
},
|
||||||
|
"resetView": {
|
||||||
|
"desc": "Tuval Görüşünü Resetle",
|
||||||
|
"title": "Görüşü Resetle"
|
||||||
|
},
|
||||||
|
"cancelAndClear": {
|
||||||
|
"desc": "Geçerli işi geri çek ve sıradaki tüm işleri sil",
|
||||||
|
"title": "Vazgeç ve Sil"
|
||||||
|
},
|
||||||
|
"decreaseBrushSize": {
|
||||||
|
"title": "Fırça Boyutunu Düşür",
|
||||||
|
"desc": "Tuval fırçasının/silgisinin boyutunu düşürür"
|
||||||
|
},
|
||||||
|
"resetOptionsAndGallery": {
|
||||||
|
"desc": "Ayarlar ve galeri panellerini resetler",
|
||||||
|
"title": "Ayarları ve Galeriyi Resetle"
|
||||||
|
},
|
||||||
|
"remixImage": {
|
||||||
|
"desc": "Seçili görselin tohumu hariç tüm değişkenlerini kullan",
|
||||||
|
"title": "Benzerini Türet"
|
||||||
|
},
|
||||||
|
"undoStroke": {
|
||||||
|
"title": "Vuruşu Geri Al",
|
||||||
|
"desc": "Fırça vuruşunu geri al"
|
||||||
|
},
|
||||||
|
"saveToGallery": {
|
||||||
|
"title": "Galeriye Gönder",
|
||||||
|
"desc": "Tuval içeriğini galeriye gönder"
|
||||||
|
},
|
||||||
|
"unifiedCanvasHotkeys": "Tuval",
|
||||||
|
"addNodes": {
|
||||||
|
"desc": "Çizge ekleme menüsünü açar",
|
||||||
|
"title": "Çizge Ekle"
|
||||||
|
},
|
||||||
|
"eraseBoundingBox": {
|
||||||
|
"desc": "Sınırlayıcı kutunun içini boşaltır",
|
||||||
|
"title": "Sınırlayıcı Kutuyu Boşalt"
|
||||||
|
},
|
||||||
|
"selectBrush": {
|
||||||
|
"desc": "Tuval fırçasını kullan",
|
||||||
|
"title": "Fırçayı Kullan"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"embedding": {
|
||||||
|
"incompatibleModel": "Uyumsuz ana model:"
|
||||||
|
},
|
||||||
|
"unifiedCanvas": {
|
||||||
|
"accept": "Onayla",
|
||||||
|
"emptyTempImagesFolderMessage": "Geçici görsel klasörünü boşaltmak Tuvali resetler. Yineleme ve geri alma geçmişi, görsel parçası bölümü ve tuval taban katmanı da dolayısıla resetlenir.",
|
||||||
|
"clearCanvasHistoryMessage": "Tuval geçmişini silmek tuvale dokunmaz, ancak yineleme ve geri alma geçmişini geri dönülemez bir biçimde siler."
|
||||||
|
},
|
||||||
|
"nodes": {
|
||||||
|
"unableToValidateWorkflow": "İş Akışı Doğrulanamadı",
|
||||||
|
"workflowContact": "İletişim",
|
||||||
|
"loadWorkflow": "İş Akışı Yükle",
|
||||||
|
"workflowNotes": "Notlar",
|
||||||
|
"workflow": "İş Akışı",
|
||||||
|
"notesDescription": "İş akışınız hakkında not düşün",
|
||||||
|
"workflowTags": "Etiketler",
|
||||||
|
"workflowDescription": "Kısa Tanım",
|
||||||
|
"workflowValidation": "İş Akışı Doğrulama Sorunu",
|
||||||
|
"workflowVersion": "Sürüm",
|
||||||
|
"newWorkflow": "Yeni İş Akışı",
|
||||||
|
"currentImageDescription": "İşlemdeki görseli Çizge Düzenleyicide gösterir",
|
||||||
|
"workflowAuthor": "Yaratıcı",
|
||||||
|
"workflowName": "Ad",
|
||||||
|
"workflowSettings": "İş Akışı Düzenleyici Ayarları",
|
||||||
|
"currentImage": "İşlemdeki Görsel",
|
||||||
|
"noWorkflow": "İş Akışı Yok",
|
||||||
|
"newWorkflowDesc": "Yeni iş akışı?",
|
||||||
|
"problemReadingWorkflow": "Görselden iş akışı çağrılamadı",
|
||||||
|
"downloadWorkflow": "İş Akışını İndir (JSON)",
|
||||||
|
"unableToMigrateWorkflow": "İş Akışı Aktarılamadı",
|
||||||
|
"unknownErrorValidatingWorkflow": "İş akışını doğrulamada bilinmeyen bir sorun",
|
||||||
|
"unableToGetWorkflowVersion": "İş akışı sürümüne ulaşılamadı",
|
||||||
|
"unrecognizedWorkflowVersion": "Tanınmayan iş akışı sürümü {{version}}",
|
||||||
|
"newWorkflowDesc2": "Geçerli iş akışında kaydedilmemiş değişiklikler var.",
|
||||||
|
"unableToLoadWorkflow": "İş Akışı Yüklenemedi"
|
||||||
|
},
|
||||||
|
"workflows": {
|
||||||
|
"searchWorkflows": "İş Akışlarında Ara",
|
||||||
|
"workflowName": "İş Akışı Adı",
|
||||||
|
"problemSavingWorkflow": "İş Akışını Kaydetmede Sorun",
|
||||||
|
"saveWorkflow": "İş Akışını Kaydet",
|
||||||
|
"uploadWorkflow": "Dosyadan Yükle",
|
||||||
|
"newWorkflowCreated": "Yeni İş Akışı Yaratıldı",
|
||||||
|
"problemLoading": "İş Akışlarını Yüklemede Sorun",
|
||||||
|
"loading": "İş Akışları Yükleniyor",
|
||||||
|
"noDescription": "Tanımsız",
|
||||||
|
"workflowIsOpen": "İş Akışı Açık",
|
||||||
|
"clearWorkflowSearchFilter": "İş Akışı Aramasını Resetle",
|
||||||
|
"workflowEditorMenu": "İş Akışı Düzenleyici Menüsü",
|
||||||
|
"downloadWorkflow": "İndir",
|
||||||
|
"saveWorkflowAs": "İş Akışını Farklı Kaydet",
|
||||||
|
"savingWorkflow": "İş Akışı Kaydediliyor...",
|
||||||
|
"userWorkflows": "İş Akışlarım",
|
||||||
|
"defaultWorkflows": "Varsayılan İş Akışları",
|
||||||
|
"workflows": "İş Akışları",
|
||||||
|
"workflowLibrary": "Depo",
|
||||||
|
"deleteWorkflow": "İş Akışını Sil",
|
||||||
|
"unnamedWorkflow": "Adsız İş Akışı",
|
||||||
|
"noWorkflows": "İş Akışı Yok",
|
||||||
|
"workflowSaved": "İş Akışı Kaydedildi"
|
||||||
|
},
|
||||||
|
"toast": {
|
||||||
|
"problemDownloadingCanvasDesc": "Taban katman indirilemedi",
|
||||||
|
"problemSavingMaskDesc": "Maske kaydedilemedi",
|
||||||
|
"problemSavingCanvasDesc": "Taban katman kaydedilemedi",
|
||||||
|
"problemRetrievingWorkflow": "İş Akışını Getirmede Sorun",
|
||||||
|
"workflowDeleted": "İş Akışı Silindi",
|
||||||
|
"loadedWithWarnings": "İş Akışı Yüklendi Ancak Uyarılar Var",
|
||||||
|
"problemImportingMaskDesc": "Maske aktarılamadı",
|
||||||
|
"problemMergingCanvasDesc": "Taban katman aktarılamadı",
|
||||||
|
"problemCopyingCanvasDesc": "Taban katman aktarılamadı",
|
||||||
|
"workflowLoaded": "İş Akışı Yüklendi",
|
||||||
|
"problemDeletingWorkflow": "İş Akışını Silmede Sorun"
|
||||||
|
},
|
||||||
|
"parameters": {
|
||||||
|
"invoke": {
|
||||||
|
"noPrompts": "İstem oluşturulmadı"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"modelManager": {
|
||||||
|
"baseModel": "Ana Model"
|
||||||
|
},
|
||||||
|
"dynamicPrompts": {
|
||||||
|
"loading": "Devimsel İstemler Oluşturuluyor...",
|
||||||
|
"combinatorial": "Birleşimsel Oluşturma"
|
||||||
|
},
|
||||||
|
"models": {
|
||||||
|
"incompatibleBaseModel": "Uyumsuz ana model"
|
||||||
|
},
|
||||||
|
"settings": {
|
||||||
|
"generation": "Oluşturma"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -6,9 +6,7 @@ const OPENAPI_URL = 'http://127.0.0.1:9090/openapi.json';
|
|||||||
const OUTPUT_FILE = 'src/services/api/schema.ts';
|
const OUTPUT_FILE = 'src/services/api/schema.ts';
|
||||||
|
|
||||||
async function main() {
|
async function main() {
|
||||||
process.stdout.write(
|
process.stdout.write(`Generating types "${OPENAPI_URL}" --> "${OUTPUT_FILE}"...`);
|
||||||
`Generating types "${OPENAPI_URL}" --> "${OUTPUT_FILE}"...`
|
|
||||||
);
|
|
||||||
const types = await openapiTS(OPENAPI_URL, {
|
const types = await openapiTS(OPENAPI_URL, {
|
||||||
exportType: true,
|
exportType: true,
|
||||||
transform: (schemaObject) => {
|
transform: (schemaObject) => {
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
import { Box, useGlobalModifiersInit } from '@invoke-ai/ui';
|
import { Box, useGlobalModifiersInit } from '@invoke-ai/ui-library';
|
||||||
import { useSocketIO } from 'app/hooks/useSocketIO';
|
import { useSocketIO } from 'app/hooks/useSocketIO';
|
||||||
import { useLogger } from 'app/logging/useLogger';
|
import { useLogger } from 'app/logging/useLogger';
|
||||||
import { appStarted } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
|
import { appStarted } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
|
||||||
@ -45,8 +45,7 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage }: Props) => {
|
|||||||
useGlobalModifiersInit();
|
useGlobalModifiersInit();
|
||||||
useGlobalHotkeys();
|
useGlobalHotkeys();
|
||||||
|
|
||||||
const { dropzone, isHandlingUpload, setIsHandlingUpload } =
|
const { dropzone, isHandlingUpload, setIsHandlingUpload } = useFullscreenDropzone();
|
||||||
useFullscreenDropzone();
|
|
||||||
|
|
||||||
const handleReset = useCallback(() => {
|
const handleReset = useCallback(() => {
|
||||||
clearStorage();
|
clearStorage();
|
||||||
@ -70,10 +69,7 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage }: Props) => {
|
|||||||
}, [dispatch]);
|
}, [dispatch]);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<ErrorBoundary
|
<ErrorBoundary onReset={handleReset} FallbackComponent={AppErrorBoundaryFallback}>
|
||||||
onReset={handleReset}
|
|
||||||
FallbackComponent={AppErrorBoundaryFallback}
|
|
||||||
>
|
|
||||||
<Box
|
<Box
|
||||||
id="invoke-app-wrapper"
|
id="invoke-app-wrapper"
|
||||||
w="100vw"
|
w="100vw"
|
||||||
@ -86,10 +82,7 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage }: Props) => {
|
|||||||
<InvokeTabs />
|
<InvokeTabs />
|
||||||
<AnimatePresence>
|
<AnimatePresence>
|
||||||
{dropzone.isDragActive && isHandlingUpload && (
|
{dropzone.isDragActive && isHandlingUpload && (
|
||||||
<ImageUploadOverlay
|
<ImageUploadOverlay dropzone={dropzone} setIsHandlingUpload={setIsHandlingUpload} />
|
||||||
dropzone={dropzone}
|
|
||||||
setIsHandlingUpload={setIsHandlingUpload}
|
|
||||||
/>
|
|
||||||
)}
|
)}
|
||||||
</AnimatePresence>
|
</AnimatePresence>
|
||||||
</Box>
|
</Box>
|
||||||
|
@ -1,12 +1,8 @@
|
|||||||
import { Button, Flex, Heading, Link, Text, useToast } from '@invoke-ai/ui';
|
import { Button, Flex, Heading, Link, Text, useToast } from '@invoke-ai/ui-library';
|
||||||
import newGithubIssueUrl from 'new-github-issue-url';
|
import newGithubIssueUrl from 'new-github-issue-url';
|
||||||
import { memo, useCallback, useMemo } from 'react';
|
import { memo, useCallback, useMemo } from 'react';
|
||||||
import { useTranslation } from 'react-i18next';
|
import { useTranslation } from 'react-i18next';
|
||||||
import {
|
import { PiArrowCounterClockwiseBold, PiArrowSquareOutBold, PiCopyBold } from 'react-icons/pi';
|
||||||
PiArrowCounterClockwiseBold,
|
|
||||||
PiArrowSquareOutBold,
|
|
||||||
PiCopyBold,
|
|
||||||
} from 'react-icons/pi';
|
|
||||||
import { serializeError } from 'serialize-error';
|
import { serializeError } from 'serialize-error';
|
||||||
|
|
||||||
type Props = {
|
type Props = {
|
||||||
@ -37,22 +33,8 @@ const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => {
|
|||||||
[error.message, error.name]
|
[error.message, error.name]
|
||||||
);
|
);
|
||||||
return (
|
return (
|
||||||
<Flex
|
<Flex layerStyle="body" w="100vw" h="100vh" alignItems="center" justifyContent="center" p={4}>
|
||||||
layerStyle="body"
|
<Flex layerStyle="first" flexDir="column" borderRadius="base" justifyContent="center" gap={8} p={16}>
|
||||||
w="100vw"
|
|
||||||
h="100vh"
|
|
||||||
alignItems="center"
|
|
||||||
justifyContent="center"
|
|
||||||
p={4}
|
|
||||||
>
|
|
||||||
<Flex
|
|
||||||
layerStyle="first"
|
|
||||||
flexDir="column"
|
|
||||||
borderRadius="base"
|
|
||||||
justifyContent="center"
|
|
||||||
gap={8}
|
|
||||||
p={16}
|
|
||||||
>
|
|
||||||
<Heading>{t('common.somethingWentWrong')}</Heading>
|
<Heading>{t('common.somethingWentWrong')}</Heading>
|
||||||
<Flex
|
<Flex
|
||||||
layerStyle="second"
|
layerStyle="second"
|
||||||
@ -68,19 +50,14 @@ const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => {
|
|||||||
</Text>
|
</Text>
|
||||||
</Flex>
|
</Flex>
|
||||||
<Flex gap={4}>
|
<Flex gap={4}>
|
||||||
<Button
|
<Button leftIcon={<PiArrowCounterClockwiseBold />} onClick={resetErrorBoundary}>
|
||||||
leftIcon={<PiArrowCounterClockwiseBold />}
|
|
||||||
onClick={resetErrorBoundary}
|
|
||||||
>
|
|
||||||
{t('accessibility.resetUI')}
|
{t('accessibility.resetUI')}
|
||||||
</Button>
|
</Button>
|
||||||
<Button leftIcon={<PiCopyBold />} onClick={handleCopy}>
|
<Button leftIcon={<PiCopyBold />} onClick={handleCopy}>
|
||||||
{t('common.copyError')}
|
{t('common.copyError')}
|
||||||
</Button>
|
</Button>
|
||||||
<Link href={url} isExternal>
|
<Link href={url} isExternal>
|
||||||
<Button leftIcon={<PiArrowSquareOutBold />}>
|
<Button leftIcon={<PiArrowSquareOutBold />}>{t('accessibility.createIssue')}</Button>
|
||||||
{t('accessibility.createIssue')}
|
|
||||||
</Button>
|
|
||||||
</Link>
|
</Link>
|
||||||
</Flex>
|
</Flex>
|
||||||
</Flex>
|
</Flex>
|
||||||
|
@ -10,13 +10,16 @@ import { $customStarUI } from 'app/store/nanostores/customStarUI';
|
|||||||
import { $galleryHeader } from 'app/store/nanostores/galleryHeader';
|
import { $galleryHeader } from 'app/store/nanostores/galleryHeader';
|
||||||
import { $isDebugging } from 'app/store/nanostores/isDebugging';
|
import { $isDebugging } from 'app/store/nanostores/isDebugging';
|
||||||
import { $logo } from 'app/store/nanostores/logo';
|
import { $logo } from 'app/store/nanostores/logo';
|
||||||
|
import { $openAPISchemaUrl } from 'app/store/nanostores/openAPISchemaUrl';
|
||||||
import { $projectId } from 'app/store/nanostores/projectId';
|
import { $projectId } from 'app/store/nanostores/projectId';
|
||||||
import { $queueId, DEFAULT_QUEUE_ID } from 'app/store/nanostores/queueId';
|
import { $queueId, DEFAULT_QUEUE_ID } from 'app/store/nanostores/queueId';
|
||||||
import { $store } from 'app/store/nanostores/store';
|
import { $store } from 'app/store/nanostores/store';
|
||||||
|
import { $workflowCategories } from 'app/store/nanostores/workflowCategories';
|
||||||
import { createStore } from 'app/store/store';
|
import { createStore } from 'app/store/store';
|
||||||
import type { PartialAppConfig } from 'app/types/invokeai';
|
import type { PartialAppConfig } from 'app/types/invokeai';
|
||||||
import Loading from 'common/components/Loading/Loading';
|
import Loading from 'common/components/Loading/Loading';
|
||||||
import AppDndContext from 'features/dnd/components/AppDndContext';
|
import AppDndContext from 'features/dnd/components/AppDndContext';
|
||||||
|
import type { WorkflowCategory } from 'features/nodes/types/workflow';
|
||||||
import type { PropsWithChildren, ReactNode } from 'react';
|
import type { PropsWithChildren, ReactNode } from 'react';
|
||||||
import React, { lazy, memo, useEffect, useMemo } from 'react';
|
import React, { lazy, memo, useEffect, useMemo } from 'react';
|
||||||
import { Provider } from 'react-redux';
|
import { Provider } from 'react-redux';
|
||||||
@ -28,6 +31,7 @@ const ThemeLocaleProvider = lazy(() => import('./ThemeLocaleProvider'));
|
|||||||
|
|
||||||
interface Props extends PropsWithChildren {
|
interface Props extends PropsWithChildren {
|
||||||
apiUrl?: string;
|
apiUrl?: string;
|
||||||
|
openAPISchemaUrl?: string;
|
||||||
token?: string;
|
token?: string;
|
||||||
config?: PartialAppConfig;
|
config?: PartialAppConfig;
|
||||||
customNavComponent?: ReactNode;
|
customNavComponent?: ReactNode;
|
||||||
@ -43,10 +47,12 @@ interface Props extends PropsWithChildren {
|
|||||||
socketOptions?: Partial<ManagerOptions & SocketOptions>;
|
socketOptions?: Partial<ManagerOptions & SocketOptions>;
|
||||||
isDebugging?: boolean;
|
isDebugging?: boolean;
|
||||||
logo?: ReactNode;
|
logo?: ReactNode;
|
||||||
|
workflowCategories?: WorkflowCategory[];
|
||||||
}
|
}
|
||||||
|
|
||||||
const InvokeAIUI = ({
|
const InvokeAIUI = ({
|
||||||
apiUrl,
|
apiUrl,
|
||||||
|
openAPISchemaUrl,
|
||||||
token,
|
token,
|
||||||
config,
|
config,
|
||||||
customNavComponent,
|
customNavComponent,
|
||||||
@ -59,6 +65,7 @@ const InvokeAIUI = ({
|
|||||||
socketOptions,
|
socketOptions,
|
||||||
isDebugging = false,
|
isDebugging = false,
|
||||||
logo,
|
logo,
|
||||||
|
workflowCategories,
|
||||||
}: Props) => {
|
}: Props) => {
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
// configure API client token
|
// configure API client token
|
||||||
@ -123,6 +130,16 @@ const InvokeAIUI = ({
|
|||||||
};
|
};
|
||||||
}, [customNavComponent]);
|
}, [customNavComponent]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (openAPISchemaUrl) {
|
||||||
|
$openAPISchemaUrl.set(openAPISchemaUrl);
|
||||||
|
}
|
||||||
|
|
||||||
|
return () => {
|
||||||
|
$openAPISchemaUrl.set(undefined);
|
||||||
|
};
|
||||||
|
}, [openAPISchemaUrl]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (galleryHeader) {
|
if (galleryHeader) {
|
||||||
$galleryHeader.set(galleryHeader);
|
$galleryHeader.set(galleryHeader);
|
||||||
@ -143,6 +160,16 @@ const InvokeAIUI = ({
|
|||||||
};
|
};
|
||||||
}, [logo]);
|
}, [logo]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (workflowCategories) {
|
||||||
|
$workflowCategories.set(workflowCategories);
|
||||||
|
}
|
||||||
|
|
||||||
|
return () => {
|
||||||
|
$workflowCategories.set([]);
|
||||||
|
};
|
||||||
|
}, [workflowCategories]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (socketOptions) {
|
if (socketOptions) {
|
||||||
$socketOptions.set(socketOptions);
|
$socketOptions.set(socketOptions);
|
||||||
|
@ -1,13 +1,7 @@
|
|||||||
import '@fontsource-variable/inter';
|
import '@fontsource-variable/inter';
|
||||||
import 'overlayscrollbars/overlayscrollbars.css';
|
import 'overlayscrollbars/overlayscrollbars.css';
|
||||||
|
|
||||||
import {
|
import { ChakraProvider, DarkMode, extendTheme, theme as _theme, TOAST_OPTIONS } from '@invoke-ai/ui-library';
|
||||||
ChakraProvider,
|
|
||||||
DarkMode,
|
|
||||||
extendTheme,
|
|
||||||
theme as _theme,
|
|
||||||
TOAST_OPTIONS,
|
|
||||||
} from '@invoke-ai/ui';
|
|
||||||
import type { ReactNode } from 'react';
|
import type { ReactNode } from 'react';
|
||||||
import { memo, useEffect, useMemo } from 'react';
|
import { memo, useEffect, useMemo } from 'react';
|
||||||
import { useTranslation } from 'react-i18next';
|
import { useTranslation } from 'react-i18next';
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
import { useToast } from '@invoke-ai/ui';
|
import { useToast } from '@invoke-ai/ui-library';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||||
import { addToast, clearToastQueue } from 'features/system/store/systemSlice';
|
import { addToast, clearToastQueue } from 'features/system/store/systemSlice';
|
||||||
import type { MakeToastArg } from 'features/system/util/makeToast';
|
import type { MakeToastArg } from 'features/system/util/makeToast';
|
||||||
@ -36,10 +36,7 @@ const Toaster = () => {
|
|||||||
*/
|
*/
|
||||||
export const useAppToaster = () => {
|
export const useAppToaster = () => {
|
||||||
const dispatch = useAppDispatch();
|
const dispatch = useAppDispatch();
|
||||||
const toaster = useCallback(
|
const toaster = useCallback((arg: MakeToastArg) => dispatch(addToast(makeToast(arg))), [dispatch]);
|
||||||
(arg: MakeToastArg) => dispatch(addToast(makeToast(arg))),
|
|
||||||
[dispatch]
|
|
||||||
);
|
|
||||||
|
|
||||||
return toaster;
|
return toaster;
|
||||||
};
|
};
|
||||||
|
@ -6,10 +6,7 @@ import { useAppDispatch } from 'app/store/storeHooks';
|
|||||||
import type { MapStore } from 'nanostores';
|
import type { MapStore } from 'nanostores';
|
||||||
import { atom, map } from 'nanostores';
|
import { atom, map } from 'nanostores';
|
||||||
import { useEffect, useMemo } from 'react';
|
import { useEffect, useMemo } from 'react';
|
||||||
import type {
|
import type { ClientToServerEvents, ServerToClientEvents } from 'services/events/types';
|
||||||
ClientToServerEvents,
|
|
||||||
ServerToClientEvents,
|
|
||||||
} from 'services/events/types';
|
|
||||||
import { setEventListeners } from 'services/events/util/setEventListeners';
|
import { setEventListeners } from 'services/events/util/setEventListeners';
|
||||||
import type { ManagerOptions, Socket, SocketOptions } from 'socket.io-client';
|
import type { ManagerOptions, Socket, SocketOptions } from 'socket.io-client';
|
||||||
import { io } from 'socket.io-client';
|
import { io } from 'socket.io-client';
|
||||||
@ -45,7 +42,7 @@ export const useSocketIO = () => {
|
|||||||
const socketOptions = useMemo(() => {
|
const socketOptions = useMemo(() => {
|
||||||
const options: Partial<ManagerOptions & SocketOptions> = {
|
const options: Partial<ManagerOptions & SocketOptions> = {
|
||||||
timeout: 60000,
|
timeout: 60000,
|
||||||
path: '/ws/socket.io',
|
path: baseUrl ? '/ws/socket.io' : `${window.location.pathname}ws/socket.io`,
|
||||||
autoConnect: false, // achtung! removing this breaks the dynamic middleware
|
autoConnect: false, // achtung! removing this breaks the dynamic middleware
|
||||||
forceNew: true,
|
forceNew: true,
|
||||||
};
|
};
|
||||||
@ -56,7 +53,7 @@ export const useSocketIO = () => {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return { ...options, ...addlSocketOptions };
|
return { ...options, ...addlSocketOptions };
|
||||||
}, [authToken, addlSocketOptions]);
|
}, [authToken, addlSocketOptions, baseUrl]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if ($isSocketInitialized.get()) {
|
if ($isSocketInitialized.get()) {
|
||||||
@ -64,10 +61,7 @@ export const useSocketIO = () => {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const socket: Socket<ServerToClientEvents, ClientToServerEvents> = io(
|
const socket: Socket<ServerToClientEvents, ClientToServerEvents> = io(socketUrl, socketOptions);
|
||||||
socketUrl,
|
|
||||||
socketOptions
|
|
||||||
);
|
|
||||||
setEventListeners({ dispatch, socket });
|
setEventListeners({ dispatch, socket });
|
||||||
socket.connect();
|
socket.connect();
|
||||||
|
|
||||||
|
@ -30,20 +30,11 @@ export type LoggerNamespace =
|
|||||||
| 'queue'
|
| 'queue'
|
||||||
| 'dnd';
|
| 'dnd';
|
||||||
|
|
||||||
export const logger = (namespace: LoggerNamespace) =>
|
export const logger = (namespace: LoggerNamespace) => $logger.get().child({ namespace });
|
||||||
$logger.get().child({ namespace });
|
|
||||||
|
|
||||||
export const zLogLevel = z.enum([
|
export const zLogLevel = z.enum(['trace', 'debug', 'info', 'warn', 'error', 'fatal']);
|
||||||
'trace',
|
|
||||||
'debug',
|
|
||||||
'info',
|
|
||||||
'warn',
|
|
||||||
'error',
|
|
||||||
'fatal',
|
|
||||||
]);
|
|
||||||
export type LogLevel = z.infer<typeof zLogLevel>;
|
export type LogLevel = z.infer<typeof zLogLevel>;
|
||||||
export const isLogLevel = (v: unknown): v is LogLevel =>
|
export const isLogLevel = (v: unknown): v is LogLevel => zLogLevel.safeParse(v).success;
|
||||||
zLogLevel.safeParse(v).success;
|
|
||||||
|
|
||||||
// Translate human-readable log levels to numbers, used for log filtering
|
// Translate human-readable log levels to numbers, used for log filtering
|
||||||
export const LOG_LEVEL_MAP: Record<LogLevel, number> = {
|
export const LOG_LEVEL_MAP: Record<LogLevel, number> = {
|
||||||
|
@ -17,10 +17,7 @@ export const useLogger = (namespace: LoggerNamespace) => {
|
|||||||
localStorage.setItem('ROARR_LOG', 'true');
|
localStorage.setItem('ROARR_LOG', 'true');
|
||||||
|
|
||||||
// Use a filter to show only logs of the given level
|
// Use a filter to show only logs of the given level
|
||||||
localStorage.setItem(
|
localStorage.setItem('ROARR_FILTER', `context.logLevel:>=${LOG_LEVEL_MAP[consoleLogLevel]}`);
|
||||||
'ROARR_FILTER',
|
|
||||||
`context.logLevel:>=${LOG_LEVEL_MAP[consoleLogLevel]}`
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
// Disable console log output
|
// Disable console log output
|
||||||
localStorage.setItem('ROARR_LOG', 'false');
|
localStorage.setItem('ROARR_LOG', 'false');
|
||||||
|
@ -1,8 +1,4 @@
|
|||||||
import {
|
import { createDraftSafeSelectorCreator, createSelectorCreator, lruMemoize } from '@reduxjs/toolkit';
|
||||||
createDraftSafeSelectorCreator,
|
|
||||||
createSelectorCreator,
|
|
||||||
lruMemoize,
|
|
||||||
} from '@reduxjs/toolkit';
|
|
||||||
import type { GetSelectorsOptions } from '@reduxjs/toolkit/dist/entities/state_selectors';
|
import type { GetSelectorsOptions } from '@reduxjs/toolkit/dist/entities/state_selectors';
|
||||||
import { isEqual } from 'lodash-es';
|
import { isEqual } from 'lodash-es';
|
||||||
|
|
||||||
|
@ -1,19 +1,12 @@
|
|||||||
import { StorageError } from 'app/store/enhancers/reduxRemember/errors';
|
import { StorageError } from 'app/store/enhancers/reduxRemember/errors';
|
||||||
import { $projectId } from 'app/store/nanostores/projectId';
|
import { $projectId } from 'app/store/nanostores/projectId';
|
||||||
import type { UseStore } from 'idb-keyval';
|
import type { UseStore } from 'idb-keyval';
|
||||||
import {
|
import { clear, createStore as createIDBKeyValStore, get, set } from 'idb-keyval';
|
||||||
clear,
|
|
||||||
createStore as createIDBKeyValStore,
|
|
||||||
get,
|
|
||||||
set,
|
|
||||||
} from 'idb-keyval';
|
|
||||||
import { action, atom } from 'nanostores';
|
import { action, atom } from 'nanostores';
|
||||||
import type { Driver } from 'redux-remember';
|
import type { Driver } from 'redux-remember';
|
||||||
|
|
||||||
// Create a custom idb-keyval store (just needed to customize the name)
|
// Create a custom idb-keyval store (just needed to customize the name)
|
||||||
export const $idbKeyValStore = atom<UseStore>(
|
export const $idbKeyValStore = atom<UseStore>(createIDBKeyValStore('invoke', 'invoke-store'));
|
||||||
createIDBKeyValStore('invoke', 'invoke-store')
|
|
||||||
);
|
|
||||||
|
|
||||||
export const clearIdbKeyValStore = action($idbKeyValStore, 'clear', (store) => {
|
export const clearIdbKeyValStore = action($idbKeyValStore, 'clear', (store) => {
|
||||||
clear(store.get());
|
clear(store.get());
|
||||||
|
@ -4,13 +4,12 @@ import { diff } from 'jsondiffpatch';
|
|||||||
/**
|
/**
|
||||||
* Super simple logger middleware. Useful for debugging when the redux devtools are awkward.
|
* Super simple logger middleware. Useful for debugging when the redux devtools are awkward.
|
||||||
*/
|
*/
|
||||||
export const debugLoggerMiddleware: Middleware =
|
export const debugLoggerMiddleware: Middleware = (api: MiddlewareAPI) => (next) => (action) => {
|
||||||
(api: MiddlewareAPI) => (next) => (action) => {
|
const originalState = api.getState();
|
||||||
const originalState = api.getState();
|
console.log('REDUX: dispatching', action);
|
||||||
console.log('REDUX: dispatching', action);
|
const result = next(action);
|
||||||
const result = next(action);
|
const nextState = api.getState();
|
||||||
const nextState = api.getState();
|
console.log('REDUX: next state', nextState);
|
||||||
console.log('REDUX: next state', nextState);
|
console.log('REDUX: diff', diff(originalState, nextState));
|
||||||
console.log('REDUX: diff', diff(originalState, nextState));
|
return result;
|
||||||
return result;
|
};
|
||||||
};
|
|
||||||
|
@ -35,8 +35,7 @@ export const actionSanitizer = <A extends UnknownAction>(action: A): A => {
|
|||||||
if (socketGeneratorProgress.match(action)) {
|
if (socketGeneratorProgress.match(action)) {
|
||||||
const sanitized = cloneDeep(action);
|
const sanitized = cloneDeep(action);
|
||||||
if (sanitized.payload.data.progress_image) {
|
if (sanitized.payload.data.progress_image) {
|
||||||
sanitized.payload.data.progress_image.dataURL =
|
sanitized.payload.data.progress_image.dataURL = '<Progress image omitted>';
|
||||||
'<Progress image omitted>';
|
|
||||||
}
|
}
|
||||||
return sanitized;
|
return sanitized;
|
||||||
}
|
}
|
||||||
|
@ -1,9 +1,4 @@
|
|||||||
import type {
|
import type { ListenerEffect, TypedAddListener, TypedStartListening, UnknownAction } from '@reduxjs/toolkit';
|
||||||
ListenerEffect,
|
|
||||||
TypedAddListener,
|
|
||||||
TypedStartListening,
|
|
||||||
UnknownAction,
|
|
||||||
} from '@reduxjs/toolkit';
|
|
||||||
import { addListener, createListenerMiddleware } from '@reduxjs/toolkit';
|
import { addListener, createListenerMiddleware } from '@reduxjs/toolkit';
|
||||||
import { addGalleryImageClickedListener } from 'app/store/middleware/listenerMiddleware/listeners/galleryImageClicked';
|
import { addGalleryImageClickedListener } from 'app/store/middleware/listenerMiddleware/listeners/galleryImageClicked';
|
||||||
import type { AppDispatch, RootState } from 'app/store/store';
|
import type { AppDispatch, RootState } from 'app/store/store';
|
||||||
@ -47,10 +42,7 @@ import {
|
|||||||
import { addImagesStarredListener } from './listeners/imagesStarred';
|
import { addImagesStarredListener } from './listeners/imagesStarred';
|
||||||
import { addImagesUnstarredListener } from './listeners/imagesUnstarred';
|
import { addImagesUnstarredListener } from './listeners/imagesUnstarred';
|
||||||
import { addImageToDeleteSelectedListener } from './listeners/imageToDeleteSelected';
|
import { addImageToDeleteSelectedListener } from './listeners/imageToDeleteSelected';
|
||||||
import {
|
import { addImageUploadedFulfilledListener, addImageUploadedRejectedListener } from './listeners/imageUploaded';
|
||||||
addImageUploadedFulfilledListener,
|
|
||||||
addImageUploadedRejectedListener,
|
|
||||||
} from './listeners/imageUploaded';
|
|
||||||
import { addInitialImageSelectedListener } from './listeners/initialImageSelected';
|
import { addInitialImageSelectedListener } from './listeners/initialImageSelected';
|
||||||
import { addModelSelectedListener } from './listeners/modelSelected';
|
import { addModelSelectedListener } from './listeners/modelSelected';
|
||||||
import { addModelsLoadedListener } from './listeners/modelsLoaded';
|
import { addModelsLoadedListener } from './listeners/modelsLoaded';
|
||||||
@ -78,19 +70,11 @@ export const listenerMiddleware = createListenerMiddleware();
|
|||||||
|
|
||||||
export type AppStartListening = TypedStartListening<RootState, AppDispatch>;
|
export type AppStartListening = TypedStartListening<RootState, AppDispatch>;
|
||||||
|
|
||||||
export const startAppListening =
|
export const startAppListening = listenerMiddleware.startListening as AppStartListening;
|
||||||
listenerMiddleware.startListening as AppStartListening;
|
|
||||||
|
|
||||||
export const addAppListener = addListener as TypedAddListener<
|
export const addAppListener = addListener as TypedAddListener<RootState, AppDispatch>;
|
||||||
RootState,
|
|
||||||
AppDispatch
|
|
||||||
>;
|
|
||||||
|
|
||||||
export type AppListenerEffect = ListenerEffect<
|
export type AppListenerEffect = ListenerEffect<UnknownAction, RootState, AppDispatch>;
|
||||||
UnknownAction,
|
|
||||||
RootState,
|
|
||||||
AppDispatch
|
|
||||||
>;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The RTK listener middleware is a lightweight alternative sagas/observables.
|
* The RTK listener middleware is a lightweight alternative sagas/observables.
|
||||||
|
@ -1,10 +1,6 @@
|
|||||||
import { isAnyOf } from '@reduxjs/toolkit';
|
import { isAnyOf } from '@reduxjs/toolkit';
|
||||||
import { logger } from 'app/logging/logger';
|
import { logger } from 'app/logging/logger';
|
||||||
import {
|
import { canvasBatchIdsReset, commitStagingAreaImage, discardStagedImages } from 'features/canvas/store/canvasSlice';
|
||||||
canvasBatchIdsReset,
|
|
||||||
commitStagingAreaImage,
|
|
||||||
discardStagedImages,
|
|
||||||
} from 'features/canvas/store/canvasSlice';
|
|
||||||
import { addToast } from 'features/system/store/systemSlice';
|
import { addToast } from 'features/system/store/systemSlice';
|
||||||
import { t } from 'i18next';
|
import { t } from 'i18next';
|
||||||
import { queueApi } from 'services/api/endpoints/queue';
|
import { queueApi } from 'services/api/endpoints/queue';
|
||||||
@ -23,10 +19,7 @@ export const addCommitStagingAreaImageListener = () => {
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
const req = dispatch(
|
const req = dispatch(
|
||||||
queueApi.endpoints.cancelByBatchIds.initiate(
|
queueApi.endpoints.cancelByBatchIds.initiate({ batch_ids: batchIds }, { fixedCacheKey: 'cancelByBatchIds' })
|
||||||
{ batch_ids: batchIds },
|
|
||||||
{ fixedCacheKey: 'cancelByBatchIds' }
|
|
||||||
)
|
|
||||||
);
|
);
|
||||||
const { canceled } = await req.unwrap();
|
const { canceled } = await req.unwrap();
|
||||||
req.reset();
|
req.reset();
|
||||||
|
@ -12,15 +12,9 @@ export const appStarted = createAction('app/appStarted');
|
|||||||
export const addFirstListImagesListener = () => {
|
export const addFirstListImagesListener = () => {
|
||||||
startAppListening({
|
startAppListening({
|
||||||
matcher: imagesApi.endpoints.listImages.matchFulfilled,
|
matcher: imagesApi.endpoints.listImages.matchFulfilled,
|
||||||
effect: async (
|
effect: async (action, { dispatch, unsubscribe, cancelActiveListeners }) => {
|
||||||
action,
|
|
||||||
{ dispatch, unsubscribe, cancelActiveListeners }
|
|
||||||
) => {
|
|
||||||
// Only run this listener on the first listImages request for no-board images
|
// Only run this listener on the first listImages request for no-board images
|
||||||
if (
|
if (action.meta.arg.queryCacheKey !== getListImagesUrl({ board_id: 'none', categories: IMAGE_CATEGORIES })) {
|
||||||
action.meta.arg.queryCacheKey !==
|
|
||||||
getListImagesUrl({ board_id: 'none', categories: IMAGE_CATEGORIES })
|
|
||||||
) {
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,8 +1,5 @@
|
|||||||
import { setInfillMethod } from 'features/parameters/store/generationSlice';
|
import { setInfillMethod } from 'features/parameters/store/generationSlice';
|
||||||
import {
|
import { shouldUseNSFWCheckerChanged, shouldUseWatermarkerChanged } from 'features/system/store/systemSlice';
|
||||||
shouldUseNSFWCheckerChanged,
|
|
||||||
shouldUseWatermarkerChanged,
|
|
||||||
} from 'features/system/store/systemSlice';
|
|
||||||
import { appInfoApi } from 'services/api/endpoints/appInfo';
|
import { appInfoApi } from 'services/api/endpoints/appInfo';
|
||||||
|
|
||||||
import { startAppListening } from '..';
|
import { startAppListening } from '..';
|
||||||
@ -11,11 +8,7 @@ export const addAppConfigReceivedListener = () => {
|
|||||||
startAppListening({
|
startAppListening({
|
||||||
matcher: appInfoApi.endpoints.getAppConfig.matchFulfilled,
|
matcher: appInfoApi.endpoints.getAppConfig.matchFulfilled,
|
||||||
effect: async (action, { getState, dispatch }) => {
|
effect: async (action, { getState, dispatch }) => {
|
||||||
const {
|
const { infill_methods = [], nsfw_methods = [], watermarking_methods = [] } = action.payload;
|
||||||
infill_methods = [],
|
|
||||||
nsfw_methods = [],
|
|
||||||
watermarking_methods = [],
|
|
||||||
} = action.payload;
|
|
||||||
const infillMethod = getState().generation.infillMethod;
|
const infillMethod = getState().generation.infillMethod;
|
||||||
|
|
||||||
if (!infill_methods.includes(infillMethod)) {
|
if (!infill_methods.includes(infillMethod)) {
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
import { createStandaloneToast, theme, TOAST_OPTIONS } from '@invoke-ai/ui';
|
import { createStandaloneToast, theme, TOAST_OPTIONS } from '@invoke-ai/ui-library';
|
||||||
import { logger } from 'app/logging/logger';
|
import { logger } from 'app/logging/logger';
|
||||||
import { parseify } from 'common/util/serialize';
|
import { parseify } from 'common/util/serialize';
|
||||||
import { zPydanticValidationError } from 'features/system/store/zodSchemas';
|
import { zPydanticValidationError } from 'features/system/store/zodSchemas';
|
||||||
@ -20,10 +20,7 @@ export const addBatchEnqueuedListener = () => {
|
|||||||
effect: async (action) => {
|
effect: async (action) => {
|
||||||
const response = action.payload;
|
const response = action.payload;
|
||||||
const arg = action.meta.arg.originalArgs;
|
const arg = action.meta.arg.originalArgs;
|
||||||
logger('queue').debug(
|
logger('queue').debug({ enqueueResult: parseify(response) }, 'Batch enqueued');
|
||||||
{ enqueueResult: parseify(response) },
|
|
||||||
'Batch enqueued'
|
|
||||||
);
|
|
||||||
|
|
||||||
if (!toast.isActive('batch-queued')) {
|
if (!toast.isActive('batch-queued')) {
|
||||||
toast({
|
toast({
|
||||||
@ -53,10 +50,7 @@ export const addBatchEnqueuedListener = () => {
|
|||||||
status: 'error',
|
status: 'error',
|
||||||
description: 'Unknown Error',
|
description: 'Unknown Error',
|
||||||
});
|
});
|
||||||
logger('queue').error(
|
logger('queue').error({ batchConfig: parseify(arg), error: parseify(response) }, t('queue.batchFailedToQueue'));
|
||||||
{ batchConfig: parseify(arg), error: parseify(response) },
|
|
||||||
t('queue.batchFailedToQueue')
|
|
||||||
);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -81,10 +75,7 @@ export const addBatchEnqueuedListener = () => {
|
|||||||
status: 'error',
|
status: 'error',
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
logger('queue').error(
|
logger('queue').error({ batchConfig: parseify(arg), error: parseify(response) }, t('queue.batchFailedToQueue'));
|
||||||
{ batchConfig: parseify(arg), error: parseify(response) },
|
|
||||||
t('queue.batchFailedToQueue')
|
|
||||||
);
|
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
@ -22,13 +22,7 @@ export const addDeleteBoardAndImagesFulfilledListener = () => {
|
|||||||
|
|
||||||
const { generation, canvas, nodes, controlAdapters } = getState();
|
const { generation, canvas, nodes, controlAdapters } = getState();
|
||||||
deleted_images.forEach((image_name) => {
|
deleted_images.forEach((image_name) => {
|
||||||
const imageUsage = getImageUsage(
|
const imageUsage = getImageUsage(generation, canvas, nodes, controlAdapters, image_name);
|
||||||
generation,
|
|
||||||
canvas,
|
|
||||||
nodes,
|
|
||||||
controlAdapters,
|
|
||||||
image_name
|
|
||||||
);
|
|
||||||
|
|
||||||
if (imageUsage.isInitialImage && !wasInitialImageReset) {
|
if (imageUsage.isInitialImage && !wasInitialImageReset) {
|
||||||
dispatch(clearInitialImage());
|
dispatch(clearInitialImage());
|
||||||
|
@ -1,13 +1,6 @@
|
|||||||
import { isAnyOf } from '@reduxjs/toolkit';
|
import { isAnyOf } from '@reduxjs/toolkit';
|
||||||
import {
|
import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice';
|
||||||
boardIdSelected,
|
import { ASSETS_CATEGORIES, IMAGE_CATEGORIES } from 'features/gallery/store/types';
|
||||||
galleryViewChanged,
|
|
||||||
imageSelected,
|
|
||||||
} from 'features/gallery/store/gallerySlice';
|
|
||||||
import {
|
|
||||||
ASSETS_CATEGORIES,
|
|
||||||
IMAGE_CATEGORIES,
|
|
||||||
} from 'features/gallery/store/types';
|
|
||||||
import { imagesApi } from 'services/api/endpoints/images';
|
import { imagesApi } from 'services/api/endpoints/images';
|
||||||
import { imagesSelectors } from 'services/api/util';
|
import { imagesSelectors } from 'services/api/util';
|
||||||
|
|
||||||
@ -16,55 +9,38 @@ import { startAppListening } from '..';
|
|||||||
export const addBoardIdSelectedListener = () => {
|
export const addBoardIdSelectedListener = () => {
|
||||||
startAppListening({
|
startAppListening({
|
||||||
matcher: isAnyOf(boardIdSelected, galleryViewChanged),
|
matcher: isAnyOf(boardIdSelected, galleryViewChanged),
|
||||||
effect: async (
|
effect: async (action, { getState, dispatch, condition, cancelActiveListeners }) => {
|
||||||
action,
|
|
||||||
{ getState, dispatch, condition, cancelActiveListeners }
|
|
||||||
) => {
|
|
||||||
// Cancel any in-progress instances of this listener, we don't want to select an image from a previous board
|
// Cancel any in-progress instances of this listener, we don't want to select an image from a previous board
|
||||||
cancelActiveListeners();
|
cancelActiveListeners();
|
||||||
|
|
||||||
const state = getState();
|
const state = getState();
|
||||||
|
|
||||||
const board_id = boardIdSelected.match(action)
|
const board_id = boardIdSelected.match(action) ? action.payload.boardId : state.gallery.selectedBoardId;
|
||||||
? action.payload.boardId
|
|
||||||
: state.gallery.selectedBoardId;
|
|
||||||
|
|
||||||
const galleryView = galleryViewChanged.match(action)
|
const galleryView = galleryViewChanged.match(action) ? action.payload : state.gallery.galleryView;
|
||||||
? action.payload
|
|
||||||
: state.gallery.galleryView;
|
|
||||||
|
|
||||||
// when a board is selected, we need to wait until the board has loaded *some* images, then select the first one
|
// when a board is selected, we need to wait until the board has loaded *some* images, then select the first one
|
||||||
const categories =
|
const categories = galleryView === 'images' ? IMAGE_CATEGORIES : ASSETS_CATEGORIES;
|
||||||
galleryView === 'images' ? IMAGE_CATEGORIES : ASSETS_CATEGORIES;
|
|
||||||
|
|
||||||
const queryArgs = { board_id: board_id ?? 'none', categories };
|
const queryArgs = { board_id: board_id ?? 'none', categories };
|
||||||
|
|
||||||
// wait until the board has some images - maybe it already has some from a previous fetch
|
// wait until the board has some images - maybe it already has some from a previous fetch
|
||||||
// must use getState() to ensure we do not have stale state
|
// must use getState() to ensure we do not have stale state
|
||||||
const isSuccess = await condition(
|
const isSuccess = await condition(
|
||||||
() =>
|
() => imagesApi.endpoints.listImages.select(queryArgs)(getState()).isSuccess,
|
||||||
imagesApi.endpoints.listImages.select(queryArgs)(getState())
|
|
||||||
.isSuccess,
|
|
||||||
5000
|
5000
|
||||||
);
|
);
|
||||||
|
|
||||||
if (isSuccess) {
|
if (isSuccess) {
|
||||||
// the board was just changed - we can select the first image
|
// the board was just changed - we can select the first image
|
||||||
const { data: boardImagesData } =
|
const { data: boardImagesData } = imagesApi.endpoints.listImages.select(queryArgs)(getState());
|
||||||
imagesApi.endpoints.listImages.select(queryArgs)(getState());
|
|
||||||
|
|
||||||
if (
|
if (boardImagesData && boardIdSelected.match(action) && action.payload.selectedImageName) {
|
||||||
boardImagesData &&
|
const selectedImage = imagesSelectors.selectById(boardImagesData, action.payload.selectedImageName);
|
||||||
boardIdSelected.match(action) &&
|
dispatch(imageSelected(selectedImage || null));
|
||||||
action.payload.selectedImageName
|
} else if (boardImagesData) {
|
||||||
) {
|
|
||||||
const firstImage = imagesSelectors.selectAll(boardImagesData)[0];
|
const firstImage = imagesSelectors.selectAll(boardImagesData)[0];
|
||||||
const selectedImage = imagesSelectors.selectById(
|
dispatch(imageSelected(firstImage || null));
|
||||||
boardImagesData,
|
|
||||||
action.payload.selectedImageName
|
|
||||||
);
|
|
||||||
|
|
||||||
dispatch(imageSelected(selectedImage || firstImage || null));
|
|
||||||
} else {
|
} else {
|
||||||
// board has no images - deselect
|
// board has no images - deselect
|
||||||
dispatch(imageSelected(null));
|
dispatch(imageSelected(null));
|
||||||
|
@ -11,9 +11,7 @@ export const addCanvasCopiedToClipboardListener = () => {
|
|||||||
startAppListening({
|
startAppListening({
|
||||||
actionCreator: canvasCopiedToClipboard,
|
actionCreator: canvasCopiedToClipboard,
|
||||||
effect: async (action, { dispatch, getState }) => {
|
effect: async (action, { dispatch, getState }) => {
|
||||||
const moduleLog = $logger
|
const moduleLog = $logger.get().child({ namespace: 'canvasCopiedToClipboardListener' });
|
||||||
.get()
|
|
||||||
.child({ namespace: 'canvasCopiedToClipboardListener' });
|
|
||||||
const state = getState();
|
const state = getState();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -11,9 +11,7 @@ export const addCanvasDownloadedAsImageListener = () => {
|
|||||||
startAppListening({
|
startAppListening({
|
||||||
actionCreator: canvasDownloadedAsImage,
|
actionCreator: canvasDownloadedAsImage,
|
||||||
effect: async (action, { dispatch, getState }) => {
|
effect: async (action, { dispatch, getState }) => {
|
||||||
const moduleLog = $logger
|
const moduleLog = $logger.get().child({ namespace: 'canvasSavedToGalleryListener' });
|
||||||
.get()
|
|
||||||
.child({ namespace: 'canvasSavedToGalleryListener' });
|
|
||||||
const state = getState();
|
const state = getState();
|
||||||
|
|
||||||
let blob;
|
let blob;
|
||||||
@ -32,9 +30,7 @@ export const addCanvasDownloadedAsImageListener = () => {
|
|||||||
}
|
}
|
||||||
|
|
||||||
downloadBlob(blob, 'canvas.png');
|
downloadBlob(blob, 'canvas.png');
|
||||||
dispatch(
|
dispatch(addToast({ title: t('toast.canvasDownloaded'), status: 'success' }));
|
||||||
addToast({ title: t('toast.canvasDownloaded'), status: 'success' })
|
|
||||||
);
|
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
@ -13,9 +13,7 @@ export const addCanvasMergedListener = () => {
|
|||||||
startAppListening({
|
startAppListening({
|
||||||
actionCreator: canvasMerged,
|
actionCreator: canvasMerged,
|
||||||
effect: async (action, { dispatch }) => {
|
effect: async (action, { dispatch }) => {
|
||||||
const moduleLog = $logger
|
const moduleLog = $logger.get().child({ namespace: 'canvasCopiedToClipboardListener' });
|
||||||
.get()
|
|
||||||
.child({ namespace: 'canvasCopiedToClipboardListener' });
|
|
||||||
const blob = await getFullBaseLayerBlob();
|
const blob = await getFullBaseLayerBlob();
|
||||||
|
|
||||||
if (!blob) {
|
if (!blob) {
|
||||||
|
@ -21,11 +21,7 @@ type AnyControlAdapterParamChangeAction =
|
|||||||
| ReturnType<typeof controlAdapterProcessortTypeChanged>
|
| ReturnType<typeof controlAdapterProcessortTypeChanged>
|
||||||
| ReturnType<typeof controlAdapterAutoConfigToggled>;
|
| ReturnType<typeof controlAdapterAutoConfigToggled>;
|
||||||
|
|
||||||
const predicate: AnyListenerPredicate<RootState> = (
|
const predicate: AnyListenerPredicate<RootState> = (action, state, prevState) => {
|
||||||
action,
|
|
||||||
state,
|
|
||||||
prevState
|
|
||||||
) => {
|
|
||||||
const isActionMatched =
|
const isActionMatched =
|
||||||
controlAdapterProcessorParamsChanged.match(action) ||
|
controlAdapterProcessorParamsChanged.match(action) ||
|
||||||
controlAdapterModelChanged.match(action) ||
|
controlAdapterModelChanged.match(action) ||
|
||||||
@ -40,12 +36,7 @@ const predicate: AnyListenerPredicate<RootState> = (
|
|||||||
const { id } = action.payload;
|
const { id } = action.payload;
|
||||||
const prevCA = selectControlAdapterById(prevState.controlAdapters, id);
|
const prevCA = selectControlAdapterById(prevState.controlAdapters, id);
|
||||||
const ca = selectControlAdapterById(state.controlAdapters, id);
|
const ca = selectControlAdapterById(state.controlAdapters, id);
|
||||||
if (
|
if (!prevCA || !isControlNetOrT2IAdapter(prevCA) || !ca || !isControlNetOrT2IAdapter(ca)) {
|
||||||
!prevCA ||
|
|
||||||
!isControlNetOrT2IAdapter(prevCA) ||
|
|
||||||
!ca ||
|
|
||||||
!isControlNetOrT2IAdapter(ca)
|
|
||||||
) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,37 +64,28 @@ export const addControlNetImageProcessedListener = () => {
|
|||||||
);
|
);
|
||||||
const enqueueResult = await req.unwrap();
|
const enqueueResult = await req.unwrap();
|
||||||
req.reset();
|
req.reset();
|
||||||
log.debug(
|
log.debug({ enqueueResult: parseify(enqueueResult) }, t('queue.graphQueued'));
|
||||||
{ enqueueResult: parseify(enqueueResult) },
|
|
||||||
t('queue.graphQueued')
|
|
||||||
);
|
|
||||||
|
|
||||||
const [invocationCompleteAction] = await take(
|
const [invocationCompleteAction] = await take(
|
||||||
(action): action is ReturnType<typeof socketInvocationComplete> =>
|
(action): action is ReturnType<typeof socketInvocationComplete> =>
|
||||||
socketInvocationComplete.match(action) &&
|
socketInvocationComplete.match(action) &&
|
||||||
action.payload.data.queue_batch_id ===
|
action.payload.data.queue_batch_id === enqueueResult.batch.batch_id &&
|
||||||
enqueueResult.batch.batch_id &&
|
|
||||||
action.payload.data.source_node_id === nodeId
|
action.payload.data.source_node_id === nodeId
|
||||||
);
|
);
|
||||||
|
|
||||||
// We still have to check the output type
|
// We still have to check the output type
|
||||||
if (isImageOutput(invocationCompleteAction.payload.data.result)) {
|
if (isImageOutput(invocationCompleteAction.payload.data.result)) {
|
||||||
const { image_name } =
|
const { image_name } = invocationCompleteAction.payload.data.result.image;
|
||||||
invocationCompleteAction.payload.data.result.image;
|
|
||||||
|
|
||||||
// Wait for the ImageDTO to be received
|
// Wait for the ImageDTO to be received
|
||||||
const [{ payload }] = await take(
|
const [{ payload }] = await take(
|
||||||
(action) =>
|
(action) =>
|
||||||
imagesApi.endpoints.getImageDTO.matchFulfilled(action) &&
|
imagesApi.endpoints.getImageDTO.matchFulfilled(action) && action.payload.image_name === image_name
|
||||||
action.payload.image_name === image_name
|
|
||||||
);
|
);
|
||||||
|
|
||||||
const processedControlImage = payload as ImageDTO;
|
const processedControlImage = payload as ImageDTO;
|
||||||
|
|
||||||
log.debug(
|
log.debug({ controlNetId: action.payload, processedControlImage }, 'ControlNet image processed');
|
||||||
{ controlNetId: action.payload, processedControlImage },
|
|
||||||
'ControlNet image processed'
|
|
||||||
);
|
|
||||||
|
|
||||||
// Update the processed image in the store
|
// Update the processed image in the store
|
||||||
dispatch(
|
dispatch(
|
||||||
@ -105,10 +96,7 @@ export const addControlNetImageProcessedListener = () => {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(
|
log.error({ enqueueBatchArg: parseify(enqueueBatchArg) }, t('queue.graphFailedToQueue'));
|
||||||
{ enqueueBatchArg: parseify(enqueueBatchArg) },
|
|
||||||
t('queue.graphFailedToQueue')
|
|
||||||
);
|
|
||||||
|
|
||||||
if (error instanceof Object) {
|
if (error instanceof Object) {
|
||||||
if ('data' in error && 'status' in error) {
|
if ('data' in error && 'status' in error) {
|
||||||
|
@ -2,10 +2,7 @@ import { logger } from 'app/logging/logger';
|
|||||||
import { enqueueRequested } from 'app/store/actions';
|
import { enqueueRequested } from 'app/store/actions';
|
||||||
import openBase64ImageInTab from 'common/util/openBase64ImageInTab';
|
import openBase64ImageInTab from 'common/util/openBase64ImageInTab';
|
||||||
import { parseify } from 'common/util/serialize';
|
import { parseify } from 'common/util/serialize';
|
||||||
import {
|
import { canvasBatchIdAdded, stagingAreaInitialized } from 'features/canvas/store/canvasSlice';
|
||||||
canvasBatchIdAdded,
|
|
||||||
stagingAreaInitialized,
|
|
||||||
} from 'features/canvas/store/canvasSlice';
|
|
||||||
import { blobToDataURL } from 'features/canvas/util/blobToDataURL';
|
import { blobToDataURL } from 'features/canvas/util/blobToDataURL';
|
||||||
import { getCanvasData } from 'features/canvas/util/getCanvasData';
|
import { getCanvasData } from 'features/canvas/util/getCanvasData';
|
||||||
import { getCanvasGenerationMode } from 'features/canvas/util/getCanvasGenerationMode';
|
import { getCanvasGenerationMode } from 'features/canvas/util/getCanvasGenerationMode';
|
||||||
@ -34,20 +31,14 @@ import { startAppListening } from '..';
|
|||||||
export const addEnqueueRequestedCanvasListener = () => {
|
export const addEnqueueRequestedCanvasListener = () => {
|
||||||
startAppListening({
|
startAppListening({
|
||||||
predicate: (action): action is ReturnType<typeof enqueueRequested> =>
|
predicate: (action): action is ReturnType<typeof enqueueRequested> =>
|
||||||
enqueueRequested.match(action) &&
|
enqueueRequested.match(action) && action.payload.tabName === 'unifiedCanvas',
|
||||||
action.payload.tabName === 'unifiedCanvas',
|
|
||||||
effect: async (action, { getState, dispatch }) => {
|
effect: async (action, { getState, dispatch }) => {
|
||||||
const log = logger('queue');
|
const log = logger('queue');
|
||||||
const { prepend } = action.payload;
|
const { prepend } = action.payload;
|
||||||
const state = getState();
|
const state = getState();
|
||||||
|
|
||||||
const {
|
const { layerState, boundingBoxCoordinates, boundingBoxDimensions, isMaskEnabled, shouldPreserveMaskedArea } =
|
||||||
layerState,
|
state.canvas;
|
||||||
boundingBoxCoordinates,
|
|
||||||
boundingBoxDimensions,
|
|
||||||
isMaskEnabled,
|
|
||||||
shouldPreserveMaskedArea,
|
|
||||||
} = state.canvas;
|
|
||||||
|
|
||||||
// Build canvas blobs
|
// Build canvas blobs
|
||||||
const canvasBlobsAndImageData = await getCanvasData(
|
const canvasBlobsAndImageData = await getCanvasData(
|
||||||
@ -63,14 +54,10 @@ export const addEnqueueRequestedCanvasListener = () => {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const { baseBlob, baseImageData, maskBlob, maskImageData } =
|
const { baseBlob, baseImageData, maskBlob, maskImageData } = canvasBlobsAndImageData;
|
||||||
canvasBlobsAndImageData;
|
|
||||||
|
|
||||||
// Determine the generation mode
|
// Determine the generation mode
|
||||||
const generationMode = getCanvasGenerationMode(
|
const generationMode = getCanvasGenerationMode(baseImageData, maskImageData);
|
||||||
baseImageData,
|
|
||||||
maskImageData
|
|
||||||
);
|
|
||||||
|
|
||||||
if (state.system.enableImageDebugging) {
|
if (state.system.enableImageDebugging) {
|
||||||
const baseDataURL = await blobToDataURL(baseBlob);
|
const baseDataURL = await blobToDataURL(baseBlob);
|
||||||
@ -115,12 +102,7 @@ export const addEnqueueRequestedCanvasListener = () => {
|
|||||||
).unwrap();
|
).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
const graph = buildCanvasGraph(
|
const graph = buildCanvasGraph(state, generationMode, canvasInitImage, canvasMaskImage);
|
||||||
state,
|
|
||||||
generationMode,
|
|
||||||
canvasInitImage,
|
|
||||||
canvasMaskImage
|
|
||||||
);
|
|
||||||
|
|
||||||
log.debug({ graph: parseify(graph) }, `Canvas graph built`);
|
log.debug({ graph: parseify(graph) }, `Canvas graph built`);
|
||||||
|
|
||||||
|
@ -11,9 +11,7 @@ import { startAppListening } from '..';
|
|||||||
export const addEnqueueRequestedLinear = () => {
|
export const addEnqueueRequestedLinear = () => {
|
||||||
startAppListening({
|
startAppListening({
|
||||||
predicate: (action): action is ReturnType<typeof enqueueRequested> =>
|
predicate: (action): action is ReturnType<typeof enqueueRequested> =>
|
||||||
enqueueRequested.match(action) &&
|
enqueueRequested.match(action) && (action.payload.tabName === 'txt2img' || action.payload.tabName === 'img2img'),
|
||||||
(action.payload.tabName === 'txt2img' ||
|
|
||||||
action.payload.tabName === 'img2img'),
|
|
||||||
effect: async (action, { getState, dispatch }) => {
|
effect: async (action, { getState, dispatch }) => {
|
||||||
const state = getState();
|
const state = getState();
|
||||||
const model = state.generation.model;
|
const model = state.generation.model;
|
||||||
|
@ -32,8 +32,7 @@ export const addGalleryImageClickedListener = () => {
|
|||||||
const { imageDTO, shiftKey, ctrlKey, metaKey } = action.payload;
|
const { imageDTO, shiftKey, ctrlKey, metaKey } = action.payload;
|
||||||
const state = getState();
|
const state = getState();
|
||||||
const queryArgs = selectListImagesQueryArgs(state);
|
const queryArgs = selectListImagesQueryArgs(state);
|
||||||
const { data: listImagesData } =
|
const { data: listImagesData } = imagesApi.endpoints.listImages.select(queryArgs)(state);
|
||||||
imagesApi.endpoints.listImages.select(queryArgs)(state);
|
|
||||||
|
|
||||||
if (!listImagesData) {
|
if (!listImagesData) {
|
||||||
// Should never happen if we have clicked a gallery image
|
// Should never happen if we have clicked a gallery image
|
||||||
@ -46,12 +45,8 @@ export const addGalleryImageClickedListener = () => {
|
|||||||
if (shiftKey) {
|
if (shiftKey) {
|
||||||
const rangeEndImageName = imageDTO.image_name;
|
const rangeEndImageName = imageDTO.image_name;
|
||||||
const lastSelectedImage = selection[selection.length - 1]?.image_name;
|
const lastSelectedImage = selection[selection.length - 1]?.image_name;
|
||||||
const lastClickedIndex = imageDTOs.findIndex(
|
const lastClickedIndex = imageDTOs.findIndex((n) => n.image_name === lastSelectedImage);
|
||||||
(n) => n.image_name === lastSelectedImage
|
const currentClickedIndex = imageDTOs.findIndex((n) => n.image_name === rangeEndImageName);
|
||||||
);
|
|
||||||
const currentClickedIndex = imageDTOs.findIndex(
|
|
||||||
(n) => n.image_name === rangeEndImageName
|
|
||||||
);
|
|
||||||
if (lastClickedIndex > -1 && currentClickedIndex > -1) {
|
if (lastClickedIndex > -1 && currentClickedIndex > -1) {
|
||||||
// We have a valid range!
|
// We have a valid range!
|
||||||
const start = Math.min(lastClickedIndex, currentClickedIndex);
|
const start = Math.min(lastClickedIndex, currentClickedIndex);
|
||||||
@ -60,15 +55,8 @@ export const addGalleryImageClickedListener = () => {
|
|||||||
dispatch(selectionChanged(selection.concat(imagesToSelect)));
|
dispatch(selectionChanged(selection.concat(imagesToSelect)));
|
||||||
}
|
}
|
||||||
} else if (ctrlKey || metaKey) {
|
} else if (ctrlKey || metaKey) {
|
||||||
if (
|
if (selection.some((i) => i.image_name === imageDTO.image_name) && selection.length > 1) {
|
||||||
selection.some((i) => i.image_name === imageDTO.image_name) &&
|
dispatch(selectionChanged(selection.filter((n) => n.image_name !== imageDTO.image_name)));
|
||||||
selection.length > 1
|
|
||||||
) {
|
|
||||||
dispatch(
|
|
||||||
selectionChanged(
|
|
||||||
selection.filter((n) => n.image_name !== imageDTO.image_name)
|
|
||||||
)
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
dispatch(selectionChanged(selection.concat(imageDTO)));
|
dispatch(selectionChanged(selection.concat(imageDTO)));
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user